import os from brutus import filters import h5py import numpy as np import brutus import sys import glob from brutus import utils from brutus.utils import inv_magnitude from brutus import fitting from scipy import stats from brutus import pdf import signal import time from zero_point import zpt import shutil import sys from numpy import unravel_index import glob #number of draws to save to generate the 2D posterior Ndraws = 1000 #factor to thin samples by for saving to disk thin = 20 os.environ['model_dir']='/n/holylfs06/LABS/finkbeiner_lab/Everyone/fink2/czucker/plane_run/perstar/' sys.path.append("/n/holylfs06/LABS/finkbeiner_lab/Everyone/fink2/czucker/plane_run/perstar/") pix_index = int(sys.argv[1]) input_dir_path = str(sys.argv[2]) output_dir_path = str(sys.argv[3]) nside=1024 input_filename = input_dir_path + 'full.{}.{}.h5'.format(nside,pix_index) output_filename = output_dir_path + 'full.{}.{}'.format(nside,pix_index) ################# RUN PER STAR FITS ######################## #load filters decam_vvv_tmass_unwise_filt = filters.decam[1:] + filters.vista[2:] + filters.tmass + filters.wise[0:2] #load models (models_mist, labels_mist, lmask_mist) = utils.load_models('{}/grid_mist_v10.h5'.format(os.environ['model_dir']), filters=decam_vvv_tmass_unwise_filt, include_ms=True, include_postms=True, include_binaries=False) BF_mist = fitting.BruteForce(models_mist, labels_mist, lmask_mist) f_in = h5py.File(input_filename,'r',swmr=True) #load zeropoints zp_mist = utils.load_offsets('{}/offsets_mist_v9.txt'.format(os.environ['model_dir']), filters=decam_vvv_tmass_unwise_filt) #load tables for parallax zeropoint correction zpt.load_tables() #run brutus fitting dataset = 'pixel {}-{}'.format(nside, pix_index) fpix = f_in['photometry/{}'.format(dataset)] Nobjs = f_in['photometry/{}'.format(dataset)].attrs['N_stars'] # # DECam-VVV-2MASS-unWISE mags mag, magerr = fpix['decam_vvv_tmass_unwise_mag'], fpix['decam_vvv_tmass_unwise_err'] #add systematic error corrections #add 0.02 mag uncertainty in quadrature to decaps magerr[:,0:5] = np.sqrt(magerr[:,0:5]**2 + 0.02**2) #add 0.03 mag uncertainty in quadrature to vvv/2mass magerr[:,5:11] = np.sqrt(magerr[:,5:11]**2 + 0.03**2) #add 0.04 mag uncertainty in quadrature for unWISE magerr[:,11:] = np.sqrt(magerr[:,11:]**2 + 0.04**2) mask = np.isfinite(magerr) # create boolean band mask phot, err = inv_magnitude(mag, magerr) # convert to flux objid = fpix['obj_id'] parallax, parallax_err = fpix['parallax'], fpix['parallax_error'] coords = np.c_[fpix['l'], fpix['b']] #apply parallax correction correct_parallax_mask = np.isfinite(parallax) parallax_correction = zpt.get_zpt(fpix['phot_g_mean_mag'][correct_parallax_mask], fpix['nu_eff_used_in_astrometry'][correct_parallax_mask], fpix['pseudocolour'][correct_parallax_mask], fpix['ecl_lat'][correct_parallax_mask], fpix['astrometric_params_solved'][correct_parallax_mask],_warnings=False) parallax_correction[~np.isfinite(parallax_correction)] = 0 parallax[correct_parallax_mask] = parallax[correct_parallax_mask]-parallax_correction #remove potentially corrupted file due to previous OOM error... if os.path.isfile(output_filename+".h5"): sys.stderr.write('A file for {} already exists; checking if corrupted. \n'.format(dataset)) try: f_out = h5py.File(output_filename+'.h5','r') samps_logp = f_out["/stellar_samples/{}/samps_logp".format(dataset)] except: sys.stderr.write('{} is corrupted, rerunning pixel.\n'.format(dataset)) os.remove(output_filename+".h5") else: f_in.close() f_out.close() sys.stderr.write('{} already ran successfully, not rerunning pixel.\n'.format(dataset)) sys.exit(0) BF_mist.fit(phot, err, mask, objid, output_filename, parallax=parallax, parallax_err=parallax_err, data_coords=coords, avlim=(0,24), av_gauss=(12,1e+6), phot_offsets=zp_mist, Ndraws=Ndraws, # number of samples to save Nmc_prior=50, # number of Monte Carlo draws used to integrate priors logl_dim_prior=True, # use chi2 distribution instead of Gaussian save_dar_draws=True, # save (dist, Av, Rv) samples running_io=True, mem_lim=3000) ############## set up for removal of nearby highred stars ######### mubins = np.arange(4,19,0.125)+(0.125/2.) ebins = np.arange(0,7,0.01)+(0.01/2.) ebv_max_samp = np.load("/n/holylfs06/LABS/finkbeiner_lab/Everyone/fink2/czucker/plane_run/perstar/EBV_max_samples.npy") dbins_max_samp = np.load("/n/holylfs06/LABS/finkbeiner_lab/Everyone/fink2/czucker/plane_run/perstar/dbins_mu.npy") ebins_mesh, mubins_mesh = np.meshgrid(ebins,mubins) ebins_mesh = ebins_mesh.T mubins_mesh = mubins_mesh.T ebvcutline = np.interp(np.hstack((mubins_mesh)),dbins_max_samp, ebv_max_samp) ebvcutline = ebvcutline.reshape((ebins_mesh.shape)) mask_nearby_highred = (ebins_mesh >= ebvcutline) & (mubins_mesh < 10.4375) ####################### SAVE PDFS ############################## #open perstar output fits f_out = h5py.File(output_filename+'.h5','r+') #regrid to bayestar PDFS labels= f_out['labels'][:] model_idx = f_out['model_idx'][:] ml_scale = f_out['ml_scale'][:] ml_av = f_out['ml_av'][:] ml_rv = f_out['ml_rv'][:] ml_cov_sar = f_out['ml_cov_sar'][:] obj_log_post = f_out['obj_log_post'][:] obj_log_evid = f_out['obj_log_evid'][:] obj_chi2min = f_out['obj_chi2min'][:] obj_Nbands = f_out['obj_Nbands'][:] samps_dist = f_out['samps_dist'][:] samps_red = f_out['samps_red'][:] samps_dred = f_out['samps_dred'][:] samps_logp = f_out['samps_logp'][:] #remove stars with unreliable low mass models gridmask = np.percentile(labels_mist['mini'][model_idx], 50, axis=1) > 0.5 #remove crazy failure mode where stars get placed at a narrow wall at 30 kpc nearmask = np.percentile(samps_dist, 2.5, axis=1) < 30 #impose chi2 cut alongside cut on 0.5 solar masses and < 30 kpc good=(stats.chi2.sf(obj_chi2min, obj_Nbands) > 0.01) & gridmask & nearmask pdfbin, xedges, yedges = pdf.bin_pdfs_distred((samps_dist,samps_red,samps_dred), parallaxes=parallax,parallax_errors=parallax_err,coord=coords,ebv=True,bins=(120,700),avlim=(0,7)) pdfbin=np.transpose(pdfbin,axes=(0,2,1)) #mask out nearby highred mode that is crippling fit nearby_highred = np.sum(pdfbin[:,mask_nearby_highred],axis=1) > 0.5 #for rest of stars, set unallowed parameter space to 0 in PDFs pdfbin[:,mask_nearby_highred]=0 #remove stray nans and empty pdfs pdfmask = (np.isfinite(np.sum(pdfbin,axis=(1,2)))) & (np.sum(pdfbin,axis=(1,2))>0) good = good & pdfmask & ~nearby_highred pdfbin=pdfbin[good,:,:] #normalize pdfs pdf_sum = np.sum(pdfbin,axis=(1,2)) pdfbin = pdfbin / pdf_sum[:, np.newaxis,np.newaxis] dname='/stellar_pdfs/%s/stellar_pdfs'%(dataset) dname2='/stellar_pdfs/%s'%(dataset) dset = f_out.create_dataset( dname, data=pdfbin, chunks=True, compression='gzip', compression_opts=3 ) # Add attributes dset.attrs['nPix'] = np.array(list(dset.shape[1:]), dtype='u4') dset.attrs['min'] = np.array([0.,4.], dtype='f8') dset.attrs['max'] = np.array([7.,19.], dtype='f8') g = f_out[dname2] g.attrs['l'] = f_in['photometry/{}'.format(dataset)].attrs['l'] g.attrs['b'] = f_in['photometry/{}'.format(dataset)].attrs['b'] g.attrs['nside'] = f_in['photometry/{}'.format(dataset)].attrs['nside'] g.attrs['healpix_index'] = f_in['photometry/{}'.format(dataset)].attrs['healpix_index'] g.attrs['EBV'] = f_in['photometry/{}'.format(dataset)].attrs['EBV'] ###################### SAVE MASK ######################### dname_mask='/stellar_mask/%s/stellar_mask'%(dataset) dset_mask = f_out.create_dataset( dname_mask, data=good, chunks=True, compression='gzip', compression_opts=9, shuffle=True ) ############# SAVE PERCENTILES ########### props=['mini','feh','eep','loga','logl','logt','logg'] for prop in props: f_out.create_dataset("/stellar_percentiles/{}/{}".format(dataset,prop), data=np.percentile(labels_mist[prop][model_idx],[2.5,16,50,84,97.5],axis=1).T,chunks=True, compression='gzip',compression_opts=9,shuffle=True) f_out.create_dataset("/stellar_percentiles/{}/dist".format(dataset), data=np.percentile(samps_dist,[2.5,16,50,84,97.5],axis=1).T,chunks=True, compression='gzip',compression_opts=9,shuffle=True) f_out.create_dataset("/stellar_percentiles/{}/red".format(dataset), data=np.percentile(samps_red,[2.5,16,50,84,97.5],axis=1).T,chunks=True, compression='gzip',compression_opts=9,shuffle=True) f_out.create_dataset("/stellar_percentiles/{}/dred".format(dataset), data=np.percentile(samps_dred,[2.5,16,50,84,97.5],axis=1).T,chunks=True, compression='gzip',compression_opts=9,shuffle=True) ###################### SAVE THINNED SAMPLES AND DELETE FULL SAMPLE DATASET ################# f_out.create_dataset("/stellar_samples/{}/labels".format(dataset), data=labels, chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['labels'] f_out.create_dataset("/stellar_samples/{}/model_idx".format(dataset), data=model_idx[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['model_idx'] f_out.create_dataset("/stellar_samples/{}/ml_scale".format(dataset), data=ml_scale[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['ml_scale'] f_out.create_dataset("/stellar_samples/{}/ml_av".format(dataset), data=ml_av[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['ml_av'] f_out.create_dataset("/stellar_samples/{}/ml_rv".format(dataset), data=ml_rv[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['ml_rv'] f_out.create_dataset("/stellar_samples/{}/ml_cov_sar".format(dataset), data = ml_cov_sar[:,::thin,:,:],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['ml_cov_sar'] f_out.create_dataset("/stellar_samples/{}/obj_log_post".format(dataset), data=obj_log_post[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['obj_log_post'] f_out.create_dataset("/stellar_samples/{}/obj_log_evid".format(dataset), data=obj_log_evid,chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['obj_log_evid'] f_out.create_dataset("/stellar_samples/{}/obj_chi2min".format(dataset), data=obj_chi2min,chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['obj_chi2min'] f_out.create_dataset("/stellar_samples/{}/obj_Nbands".format(dataset), data=obj_Nbands,chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['obj_Nbands'] f_out.create_dataset("/stellar_samples/{}/samps_dist".format(dataset), data=samps_dist[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['samps_dist'] f_out.create_dataset("/stellar_samples/{}/samps_red".format(dataset), data=samps_red[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['samps_red'] f_out.create_dataset("/stellar_samples/{}/samps_dred".format(dataset), data=samps_dred[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['samps_dred'] f_out.create_dataset("/stellar_samples/{}/samps_logp".format(dataset), data=samps_logp[:,::thin],chunks=True, compression='gzip',compression_opts=9,shuffle=True) del f_out['samps_logp'] f_out.close() f_in.close()