Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
582 changes: 582 additions & 0 deletions examples/DC2/DC2_redMaPPer_clusterensemble_object.ipynb

Large diffs are not rendered by default.

140 changes: 140 additions & 0 deletions examples/DC2/extract_redmapper_data/_utils_cosmoDC2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import numpy as np
from astropy.table import QTable, Table, vstack, join
import pickle
import pandas as pd
import clmm
import cmath
import healpy
import GCRCatalogs
GCRCatalogs.set_root_dir_by_site('in2p3')
import mysql
from mysql.connector import Error

def _fix_axis_ratio(q_bad):

#from https://github.com/LSSTDESC/gcr-catalogs/blob/ellipticity_bug_fix/GCRCatalogs/cosmodc2.py
# back out incorrect computation of q using Johnsonb function
e_jb = np.sqrt((1 - q_bad**2)/(1 + q_bad**2))
q_new = np.sqrt((1 - e_jb)/(1 + e_jb)) # use correct relationship to compute q from e_jb
return q_new

def _fix_ellipticity_disk_or_bulge(ellipticity):

#from https://github.com/LSSTDESC/gcr-catalogs/blob/ellipticity_bug_fix/GCRCatalogs/cosmodc2.py
# back out incorrect computation of q using Johnsonb function
q_bad = (1-ellipticity)/(1+ellipticity) #use default e definition to calculate q
# q_bad incorrectly computed from e_jb using q_bad = sqrt((1 - e_jb^2)/(1 + e_jb^2))
q_new = _fix_axis_ratio(q_bad)
e_new = (1 - q_new)/(1 + q_new) # recompute e using default (1-q)/(1+q) definition
return e_new

def correct_shear_ellipticity(ellipticity_uncorr_e1, ellipticity_uncorr_e2):

#from https://github.com/LSSTDESC/gcr-catalogs/blob/ellipticity_bug_fix/GCRCatalogs/cosmodc2.py
ellipticity_uncorr_norm = (ellipticity_uncorr_e1**2+ellipticity_uncorr_e2**2)**.5
complex_ellipticity_uncorr = ellipticity_uncorr_e1 + 1j*ellipticity_uncorr_e2
phi = np.array([cmath.phase(c) for c in complex_ellipticity_uncorr])
ellipticity_corr_norm = _fix_ellipticity_disk_or_bulge(ellipticity_uncorr_norm)
ellipticity_corr = ellipticity_corr_norm*np.exp(1j*phi)
ellipticity_corr_e1, ellipticity_corr_e2 = ellipticity_corr.real, ellipticity_corr.imag
return ellipticity_corr_e1, ellipticity_corr_e2

def extract_cosmoDC2_galaxy(lens_z, lens_distance, ra, dec, rmax = 10, method = 'qserv'):

if method == 'qserv':

def qserv_query(lens_z, lens_distance, ra, dec, rmax = 10):
r"""
quantities wanted + cuts for qserv
Attributes:
-----------
z: float
lens redshift
lens_distance: float
distance to the cluster
ra: float
lens right ascension
dec: float
lens declinaison
rmax: float
maximum radius
"""
zmax = 3.
zmin = lens_z
theta_max = (rmax/lens_distance) * (180./np.pi)
query = "SELECT data.coord_ra as ra, data.coord_dec as dec, data.redshift as z, "
query += "data.galaxy_id as galaxy_id, data.halo_id as halo_id, "
query += "data.mag_i, data.mag_r, data.mag_y, "
query += "data.shear_1 as shear1, data.shear_2 as shear2, data.convergence as kappa, "
query += "data.ellipticity_1_true as e1_true_uncorr, data.ellipticity_2_true as e2_true_uncorr "
query += "FROM cosmoDC2_v1_1_4_image.data as data "
query += f"WHERE data.redshift >= {zmin} AND data.redshift < {zmax} "
query += f"AND scisql_s2PtInCircle(coord_ra, coord_dec, {ra}, {dec}, {theta_max}) = 1 "
query += f"AND data.mag_i <= 24.6 "
query += f"AND data.mag_r <= 28.0 "
query += ";"
return query

conn_qserv = mysql.connector.connect(host='ccqserv201', user='qsmaster', port=30040)
cursor = conn_qserv.cursor(dictionary=True, buffered=True)
qserv_query_extract = qserv_query(z, lens_distance, ra, dec, rmax=rmax)
tab = pd.read_sql_query(qserv_query_extract, conn_qserv)
tab = QTable.from_pandas(tab)
return tab


if method == 'gcrcatalogs':

def gcrcatalogs_query():

return ["galaxy_id", "ra", "dec", "shear_1", "shear_2",
"mag_i", "mag_r", "mag_y",
"redshift", "convergence", "halo_id",
"ellipticity_1_true", "ellipticity_2_true"]

theta_apperture_deg = (180/np.pi)*(rmax/lens_distance)
n = 1000
dx = np.random.random(n)*(-2) + 1
dy = np.random.random(n)*(-2) + 1
x_rand, y_rand = ra + theta_apperture_deg*dx, dec + theta_apperture_deg*dy
healpix = np.unique(healpy.ang2pix(32, x_rand, y_rand, nest=False, lonlat=True))
healpix_dc2 = GCRCatalogs.load_catalog("cosmoDC2_v1.1.4_image").get_catalog_info()['healpix_pixels']
healpix_list = healpix[np.isin(healpix, healpix_dc2)]

cosmoDC2 = GCRCatalogs.load_catalog("cosmoDC2_v1.1.4_image")

coord_filters = [
"ra >= {}".format(ra - theta_apperture_deg/2),
"ra < {}".format(ra + theta_apperture_deg/2),
"dec >= {}".format(dec - theta_apperture_deg/2),
"dec < {}".format(dec + theta_apperture_deg/2),]

z_filters = ["redshift >= {}".format(lens_z)]
mag_filters = ["mag_i < {}".format(24.6)] + ["mag_r < {}".format(28.0)]

tab = Table(names = gcrcatalogs_query())

for i, hp in enumerate(healpix_list):
print(f'-----> heapix pixel = ' + str(hp))
chunk = cosmoDC2.get_quantities(gcrcatalogs_query(), filters=(coord_filters + z_filters + mag_filters),
native_filters=[f'healpix_pixel=={hp}'], return_iterator=True)
for j in range(3):
print('chunk = ' + str(j))
try:
dat_extract_chunk = Table(next(chunk))
except: continue
tab = vstack([tab, dat_extract_chunk])

tab.rename_column('ellipticity_1_true', 'e1_true_uncorr')
tab.rename_column('ellipticity_2_true', 'e2_true_uncorr')
tab.rename_column('shear_1', 'shear1')
tab.rename_column('shear_2', 'shear2')
tab.rename_column('convergence', 'kappa')
tab.rename_column('redshift', 'z')

return tab





Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import GCRCatalogs
GCRCatalogs.set_root_dir_by_site('in2p3')
import matplotlib.pyplot as plt
import pickle
import sys
import numpy as np
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
from astropy.table import Table
def load(filename, **kwargs):
"""Loads GalaxyCluster object to filename using Pickle"""
with open(filename, 'rb') as fin:
return pickle.load(fin, **kwargs)
def save_pickle(dat, filename, **kwargs):
file = open(filename,'wb')
pickle.dump(dat, file)
file.close()
catalog = GCRCatalogs.load_catalog('cosmoDC2_v1.1.4_redmapper_v0.8.1')
quantity = ['cluster_id','ra', 'dec',
'redshift', 'redshift_err',
'richness', 'richness_err',
'ra_member', 'dec_member',
'id_member','cluster_id_member']
dat = catalog.get_quantities(quantity)

line_ra_member = []
line_dec_member = []
line_id_member = []
line_cluster_id_member = []
for i in range(len(dat['cluster_id'])):
mask = (dat['cluster_id_member']==dat['cluster_id'][i])
line_ra_member.append(dat['ra_member'][mask])
line_dec_member.append(dat['dec_member'][mask])
line_id_member.append(dat['id_member'][mask])
line_cluster_id_member.append(dat['cluster_id_member'][mask])

dictionnary = {'cluster_id': dat['cluster_id'],
'ra': dat['ra'],
'dec': dat['dec'],
'redshift': dat['redshift'],
'redshift_err': dat['redshift_err'],
'richness': dat['richness'],
'richness_err': dat['richness_err'],
'ra_member': line_ra_member,
'dec_member': line_dec_member,
'id_member': line_id_member,
'cluster_id_member': line_cluster_id_member,}

t = Table(dictionnary)

save_pickle(t, './DC2_data/cosmoDC2_v1.1.4_redmapper_v0.8.1_catalog.pkl')
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
# -*- coding: utf-8 -*-
import numpy as np
import GCRCatalogs
#GCRCatalogs.set_root_dir_by_site('in2p3')
import healpy
import glob, sys
import astropy.units as u
from astropy.io import fits as fits
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy
import clmm
import pandas as pd

from astropy.table import QTable, Table, vstack, join, hstack
import pickle, sys

def load(filename, **kwargs):
"""Loads GalaxyCluster object to filename using Pickle"""
with open(filename, 'rb') as fin:
return pickle.load(fin, **kwargs)

def save_pickle(dat, filename, **kwargs):
file = open(filename,'wb')
pickle.dump(dat, file)
file.close()

from clmm import Cosmology
from scipy.integrate import simps
#cosmoDC2 cosmology
cosmo = Cosmology(H0 = 71.0, Omega_dm0 = 0.265 - 0.0448, Omega_b0 = 0.0448, Omega_k0 = 0.0)
#connection with qserv
import mysql
from mysql.connector import Error
import argparse
import _utils_cosmoDC2

mag_i_max = 24.25
mag_r_max = 28
def source_selection_magnitude(table):
mask = table['mag_i'] < mag_i_max
mask *= table['mag_r'] < mag_r_max
return table[mask]

def collect_argparser():
parser = argparse.ArgumentParser(description="Let's extract source catalogs and estimate individual lensing profiles")
parser.add_argument("--which_split", type=int, required=True)
parser.add_argument("--number_of_splits", type=int, required=True)
parser.add_argument("--lens_catalog_name", type=str, required=False, default='./DC2_data/cosmoDC2_v1.1.4_redmapper_v0.8.1_catalog.pkl',)
parser.add_argument("--lambda_min", type=float, required=False, default=20,)
parser.add_argument("--lambda_max", type=float, required=False, default=40,)
parser.add_argument("--redshift_min", type=float, required=False, default=0.2,)
parser.add_argument("--redshift_max", type=float, required=False, default=0.4,)
parser.add_argument("--method", type=str, required=False, default='qserv',)
parser.add_argument("--rmax", type=float, required=False, default=30,)
return parser.parse_args()

#select galaxy clusters
_config_extract_sources_in_cosmoDC2 = collect_argparser()
lens_catalog_name=_config_extract_sources_in_cosmoDC2.lens_catalog_name
lens_catalog=np.load(lens_catalog_name, allow_pickle=True)
mask_select=(lens_catalog['richness'] > _config_extract_sources_in_cosmoDC2.lambda_min)
mask_select*=(lens_catalog['richness'] < _config_extract_sources_in_cosmoDC2.lambda_max)
mask_select*=(lens_catalog['redshift'] > _config_extract_sources_in_cosmoDC2.redshift_min)
mask_select*=(lens_catalog['redshift'] < _config_extract_sources_in_cosmoDC2.redshift_max)
lens_catalog=lens_catalog[mask_select]
lens_catalog_truncated=lens_catalog

n_cl=len(lens_catalog_truncated)
index_cl=np.arange(n_cl)
split_lists=np.array_split(index_cl, int(_config_extract_sources_in_cosmoDC2.number_of_splits))
lens_catalog_truncated=lens_catalog_truncated[np.array(split_lists[int(_config_extract_sources_in_cosmoDC2.which_split)])]
n_cl_to_extract=len(lens_catalog_truncated)

path_where_to_save_profiles = f'./DC2_data/individual_redMaPPer_cluster_lensing_profiles/'
name_save = path_where_to_save_profiles+f'individual_profiles'
name_save +=f'_split={_config_extract_sources_in_cosmoDC2.which_split}'
name_save +=f'_number_of_splits={_config_extract_sources_in_cosmoDC2.number_of_splits}_ncl={n_cl_to_extract}.pkl'

print(f'[cluster catalog]: {_config_extract_sources_in_cosmoDC2.lambda_min} < richness < {_config_extract_sources_in_cosmoDC2.lambda_max}')
print(f'[cluster catalog]: {_config_extract_sources_in_cosmoDC2.redshift_min} < redshift < {_config_extract_sources_in_cosmoDC2.redshift_max}')
print(f'[cluster catalog]: number of clusters = {n_cl}')
print(f'[cluster catalog]: number of clusters in split {_config_extract_sources_in_cosmoDC2.which_split} = {n_cl_to_extract}')
print(f'[individual lensing profiles]: individual profiles ' + name_save)

for n, lens in enumerate(lens_catalog_truncated):

z, ra, dec=lens['redshift'], lens['ra'], lens['dec']
cluster_id=lens['cluster_id']
richness=lens['richness']
print(f'[load background source catalog]: for redMaPPer cluster {cluster_id}')
#cluster member galaxies
id_member_galaxy = lens_catalog['id_member'][n]
ra_member_galaxy = lens_catalog['ra_member'][n]
dec_member_galaxy = lens_catalog['dec_member'][n]
lens_distance=cosmo.eval_da(z)

tab = _utils_cosmoDC2.extract_cosmoDC2_galaxy(z, lens_distance, ra, dec, rmax = _config_extract_sources_in_cosmoDC2.rmax,
method = _config_extract_sources_in_cosmoDC2.method)
#compute reduced shear and ellipticities
tab['g1'], tab['g2'] = clmm.utils.convert_shapes_to_epsilon(tab['shear1'],tab['shear2'],
shape_definition = 'shear',
kappa = tab['kappa'])
ellipticity_uncorr_e1 = tab['e1_true_uncorr']
ellipticity_uncorr_e2 = tab['e2_true_uncorr']
ellipticity_corr_e1, ellipticity_corr_e2 = _utils_cosmoDC2.correct_shear_ellipticity(ellipticity_uncorr_e1, ellipticity_uncorr_e2)
tab['e1_true'] = ellipticity_corr_e1
tab['e2_true'] = ellipticity_corr_e2
tab['e1'], tab['e2'] = clmm.utils.compute_lensed_ellipticity(tab['e1_true'], tab['e2_true'],
tab['shear1'], tab['shear2'],
tab['kappa'])

dat_extract_mag_cut = source_selection_magnitude(tab)
mask_z = dat_extract_mag_cut['z'] > z + 0.2
dat_extract_mag_cut_z_cut = dat_extract_mag_cut[mask_z]

dat = clmm.GCData(
[
dat_extract_mag_cut_z_cut["ra"],
dat_extract_mag_cut_z_cut["dec"],
dat_extract_mag_cut_z_cut["e1"],
dat_extract_mag_cut_z_cut["e2"],
dat_extract_mag_cut_z_cut["z"],
dat_extract_mag_cut_z_cut["galaxy_id"],
],
names=("ra", "dec", "e1", "e2", "z", "id"),
masked=True,)

cl = clmm.GalaxyCluster("redmapper_cluster", ra, dec, z, dat)

bin_edges = clmm.dataops.make_bins(0.5, _config_extract_sources_in_cosmoDC2.rmax, 15, method='evenlog10width')

cl.compute_tangential_and_cross_components(
shape_component1="e1",
shape_component2="e2",
tan_component="DS_t",
cross_component="DS_x",
cosmo=cosmo,
is_deltasigma=True,
use_pdz=False,)

cl.compute_galaxy_weights(
use_pdz=False,
use_shape_noise=False,
shape_component1="e1",
shape_component2="e2",
use_shape_error=False,
shape_component1_err="e_err",
shape_component2_err="e_err",
weight_name="w_ls",
cosmo=cosmo,
is_deltasigma=True,
add=True,)

cl.make_radial_profile("Mpc",
bins=bin_edges,
error_model="ste",
cosmo=cosmo,
tan_component_in="DS_t",
cross_component_in="DS_x",
tan_component_out="binned_DS_t",
cross_component_out="binned_DS_x",
tan_component_in_err=None,
cross_component_in_err=None,
include_empty_bins=False,
gal_ids_in_bins=False,
add=True,
table_name="profile",
overwrite=True,
use_weights=True,
weights_in="w_ls",
weights_out="W_l",)

binned_DS_t = cl.profile['binned_DS_t']
binned_DS_x = cl.profile['binned_DS_x']
W_l = cl.profile['W_l']
radius = cl.profile['radius']
cluster_data = [cluster_id, ra, dec, z, lens['richness']]
cluster_data_colnames = ['cluster_id', 'cluster_ra', 'cluster_dec', 'cluster_redshift', 'cluster_richness']
if n==0:
ind_profile = {k:[] for k in list(cl.profile.colnames)+cluster_data_colnames}

for index, name in enumerate(cl.profile.colnames):
ind_profile[name].append(cl.profile[name])
for index, name in enumerate(cluster_data_colnames):
ind_profile[name].append(cluster_data[index])

save_pickle(Table(ind_profile), name_save)


















Loading
Loading