Skip to content

Commit

Permalink
implemented debiasing at theory level in soliket/ccl binned lkl. Many…
Browse files Browse the repository at this point in the history
… thanks to @inigozubeldia

matches class_sz
  • Loading branch information
borisbolliet committed Sep 13, 2023
1 parent b7814bd commit 540f523
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 42 deletions.
86 changes: 47 additions & 39 deletions soliket/clusters/clusters.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,10 +233,10 @@ def _get_completeness(self, marr, zarr, y0, qbin, marr_500c=None, **params):
for i in range(Npatches):

if compl_mode == 'erf_prod':
arg.append(get_erf_prod(y0[i], noise[i], qmin, qmax, qcut, kk, Nq))
arg.append(get_erf_prod(y0[i], noise[i], qmin, qmax, qcut, kk, Nq, dof = self.debiasDOF))
#arg.append(get_stf_prod(y0[i], noise[i], qmin, qmax, qcut, kk, Nq))
elif compl_mode == 'erf_diff':
arg.append(get_erf_diff(y0[i], noise[i], qmin, qmax, qcut))
arg.append(get_erf_diff(y0[i], noise[i], qmin, qmax, qcut, dof = self.debiasDOF))
#arg.append(get_stf_diff(y0[i], noise[i], qmin, qmax, qcut))

comp = np.einsum('ijk,i->jk', np.nan_to_num(arg), skyfracs)
Expand All @@ -251,16 +251,23 @@ def _get_completeness(self, marr, zarr, y0, qbin, marr_500c=None, **params):
comp = 0.
for i in range(Npatches):
if compl_mode == 'erf_prod':
arg = get_erf_prod(yy0, noise[i], qmin, qmax, qcut, kk, Nq)
arg = get_erf_prod(yy0, noise[i], qmin, qmax, qcut, kk, Nq, dof = self.debiasDOF)
#arg = get_stf_prod(yy0, noise[i], qmin, qmax, qcut, kk, Nq)
elif compl_mode == 'erf_diff':
arg = get_erf_diff(yy0, noise[i], qmin, qmax, qcut)
arg = get_erf_diff(yy0, noise[i], qmin, qmax, qcut, dof = self.debiasDOF)
#arg = get_stf_diff(yy0, noise[i], qmin, qmax, qcut)

cc = np.float32(arg * skyfracs[i])
arg0 = np.float32((lnyy[:, None,None] - mu[i])/(np.sqrt(2.)*scatter))
args = fac * np.exp(np.float32(-arg0**2.)) * cc[:, None,None]
comp += np.trapz(np.float32(args), x=lnyy, axis=0)

# truncation for debiasing:
qtab_filter = yy0/noise[i]
qtab_filter[qtab_filter<self.debias_cutoff] = 0.
qtab_filter[qtab_filter>=self.debias_cutoff] = 1.
# end truncation for debiasing.

comp += np.trapz(np.float32(args)*qtab_filter[:,None,None], x=lnyy, axis=0)

comp[comp < 0.] = 0.
comp[comp > 1.] = 1.
Expand Down Expand Up @@ -675,8 +682,9 @@ def initialize_common(self):
# to print all columns: print(catf[1].columns)

# SPT-style SNR bias correction, this should be applied before applying SNR cut
debiasDOF = self.selfunc['debiasDOF']
qcat = np.sqrt(np.power(qcat, 2) - debiasDOF)
self.debiasDOF = self.selfunc['debiasDOF']
self.debias_cutoff = self.selfunc['debias_cutoff']
#qcat = np.sqrt(np.power(qcat, 2) - debiasDOF) # dont debias catalog q!

# only above given SNR cut
ind = np.where(qcat >= self.qcut)[0]
Expand Down Expand Up @@ -1195,57 +1203,57 @@ def tinker(sgm, z):
# return self.get_dndlnM_at_z_and_M(z,marr)


def get_erf(y, noise, cut):
def get_erf(y, noise, cut, dof = 0):
#arg = (y - cut*noise)/np.sqrt(2.)/noise
arg = (y/noise - cut)/np.sqrt(2.)
arg = (np.sqrt((y/noise)**2.+dof) - cut)/np.sqrt(2.)
erfc = (special.erf(arg) + 1.)/2.
return erfc

def get_stf(y, noise, qcut):
ans = y * 0.0
ans[y - qcut*noise > 0] = 1.0
return ans
# def get_stf(y, noise, qcut):
# ans = y * 0.0
# ans[y - qcut*noise > 0] = 1.0
# return ans

def get_erf_diff(y, noise, qmin, qmax, qcut):
arg1 = (y/noise - qmax)/np.sqrt(2.)
def get_erf_diff(y, noise, qmin, qmax, qcut, dof = 0):
arg1 = (np.sqrt((y/noise)**2.+dof) - qmax)/np.sqrt(2.)
if qmin > qcut:
qlim = qmin
else:
qlim = qcut
arg2 = (y/noise - qlim)/np.sqrt(2.)
arg2 = (np.sqrt((y/noise)**2.+dof) - qlim)/np.sqrt(2.)
erf_compl = (special.erf(arg2) - special.erf(arg1)) / 2.
return erf_compl

def get_stf_diff(y, noise, qmin, qmax, qcut):
if qmin > qcut:
qmin = qmin
else:
qmin = qcut
ans = y * 0.0
ans[(y - qmin*noise > 0) & (y - qmax*noise < 0)] = 1.0
return ans

def get_erf_prod(y, noise, qmin, qmax, qcut, k, Nq):

arg0 = get_erf(y, noise, qcut)
arg1 = get_erf(y, noise, qmin)
arg2 = 1. - get_erf(y, noise, qmax)

if k == 0: arg1 = 1
if k == Nq-1: arg2 = 1

return arg0 * arg1 * arg2
# def get_stf_diff(y, noise, qmin, qmax, qcut):
# if qmin > qcut:
# qmin = qmin
# else:
# qmin = qcut
# ans = y * 0.0
# ans[(y - qmin*noise > 0) & (y - qmax*noise < 0)] = 1.0
# return ans

def get_stf_prod(y, noise, qmin, qmax, qcut, k, Nq):
def get_erf_prod(y, noise, qmin, qmax, qcut, k, Nq, dof = 0):

arg0 = get_stf(y, noise, qcut)
arg1 = get_stf(y, noise, qmin)
arg2 = 1. - get_stf(y, noise, qmax)
arg0 = get_erf(y, noise, qcut, dof)
arg1 = get_erf(y, noise, qmin, dof)
arg2 = 1. - get_erf(y, noise, qmax, dof)

if k == 0: arg1 = 1
if k == Nq-1: arg2 = 1

return arg0 * arg1 * arg2
#
# def get_stf_prod(y, noise, qmin, qmax, qcut, k, Nq):
#
# arg0 = get_stf(y, noise, qcut)
# arg1 = get_stf(y, noise, qmin)
# arg2 = 1. - get_stf(y, noise, qmax)
#
# if k == 0: arg1 = 1
# if k == Nq-1: arg2 = 1
#
# return arg0 * arg1 * arg2

def gaussian(xx, mu, sig, noNorm=False):
if noNorm:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ likelihood:
dwnsmpl_bins : 30 # If resolution=downsample, number of bins to use
save_dwsmpld : False # Save downsampled Q and rms to npz file and once it exists read those

debiasDOF : 0.
debiasDOF : 3.
debias_cutoff : 2.
binning:
# redshift bins for number counts
z:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ likelihood:
save_dwsmpld : False # Save downsampled Q and rms to npz file and once it exists read those

debiasDOF : 0.
debias_cutoff: 0.
binning:
# redshift bins for number counts
z:
Expand Down Expand Up @@ -191,8 +192,8 @@ theory:
use_planck_binned_proba : 0 #use diff of erfs
use_skyaveraged_noise: 0 # this will speed-up everything

szcc_dof : 0.
szcc_qtrunc : 0.
szcc_dof : 3.
szcc_qtrunc : 2.


stop_at_error: True
Expand Down

0 comments on commit 540f523

Please sign in to comment.