Skip to content

Commit

Permalink
Merge pull request #347 from johannbrehmer/develop
Browse files Browse the repository at this point in the history
Fixes for SCANDAL (and Python 2)
  • Loading branch information
johannbrehmer authored Jun 10, 2019
2 parents 097e369 + 673b451 commit e70521b
Show file tree
Hide file tree
Showing 10 changed files with 593 additions and 45 deletions.
505 changes: 505 additions & 0 deletions examples/tutorial_particle_physics/3a_likelihood.ipynb

Large diffs are not rendered by default.

79 changes: 59 additions & 20 deletions examples/tutorial_particle_physics/4a_limits.ipynb

Large diffs are not rendered by default.

6 changes: 2 additions & 4 deletions madminer/fisherinformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -933,9 +933,7 @@ def histogram_of_fisher_information(

# Rescale for test_split
if test_split is not None:
correction = np.array(
[1./test_split for obs_event in observations]
)
correction = np.array([1.0 / test_split for obs_event in observations])
weights_benchmarks *= correction[:, np.newaxis]

weights_theta = mdot(theta_matrix, weights_benchmarks)
Expand All @@ -954,7 +952,7 @@ def histogram_of_fisher_information(
fisher_info_events = model.calculate_fisher_information(
x=observations,
weights=weights_theta,
n_events=luminosity * np.sum(weights_theta) ,
n_events=luminosity * np.sum(weights_theta),
sum_events=False,
)

Expand Down
19 changes: 12 additions & 7 deletions madminer/limits.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def _analyse(
model = load_estimator(model_file)

logger.info("Calculating kinematic log likelihood ratio with estimator")
log_r_kin = self._calculate_log_likelihood_ratio_kinematics(x, theta_grid, model,theta_true)
log_r_kin = self._calculate_log_likelihood_ratio_kinematics(x, theta_grid, model, theta_true)
log_r_kin = log_r_kin.astype(np.float64)
log_r_kin = self._clean_nans(log_r_kin)
logger.debug("Raw mean -2 log r: %s", np.mean(-2.0 * log_r_kin, axis=1))
Expand Down Expand Up @@ -368,11 +368,8 @@ def _calculate_log_likelihood_ratio_kinematics(self, x_observed, theta_grid, mod
x=x_observed, theta=theta_grid, test_all_combinations=True, evaluate_score=False
)
elif isinstance(model, LikelihoodEstimator):
raise NotImplementedError(
"LikelihoodEstimator is not implemented!"
)
log_r, _ = model.evaluate_log_likelihood_ratio(
x=x_observed, theta0=theta_grid, theta1=np.array(theta1), test_all_combinations=True, evaluate_score=False
log_r, _ = model.evaluate_log_likelihood(
x=x_observed, theta=theta_grid, test_all_combinations=True, evaluate_score=False
)
elif isinstance(model, Ensemble) and model.estimator_type == "parameterized_ratio":
log_r, _ = model.evaluate_log_likelihood_ratio(
Expand All @@ -382,10 +379,18 @@ def _calculate_log_likelihood_ratio_kinematics(self, x_observed, theta_grid, mod
evaluate_score=False,
calculate_covariance=False,
)
elif isinstance(model, Ensemble) and model.estimator_type == "likelihood":
log_r, _ = model.evaluate_log_likelihood(
x=x_observed,
theta=theta_grid,
test_all_combinations=True,
evaluate_score=False,
calculate_covariance=False,
)
else:
raise NotImplementedError(
"Likelihood ratio estimation is currently only implemented for "
"ParameterizedRatioEstimator instancees"
"ParameterizedRatioEstimator and LikelihoodEstimator instancees"
)
return log_r

Expand Down
15 changes: 8 additions & 7 deletions madminer/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -991,10 +991,10 @@ def plot_fisher_information_contours_2d(
logger.debug("Std: %s", uncertainties)

# Plot results
do_fig=False
do_fig = False
if ax is None:
do_fig=True
fig,ax = plt.figure(figsize=(5.0, 5.0))
do_fig = True
fig, ax = plt.figure(figsize=(5.0, 5.0))

# Error bands
for i in range(n_matrices):
Expand Down Expand Up @@ -1036,6 +1036,7 @@ def plot_fisher_information_contours_2d(
else:
return ax


def plot_fisherinfo_barplot(
fisher_information_matrices, labels, determinant_indices=None, eigenvalue_colors=None, bar_colors=None
):
Expand Down Expand Up @@ -1246,10 +1247,10 @@ def plot_distribution_of_information(
Plot as Matplotlib Figure instance.
"""
#prepare Plot
# prepare Plot
if fontsize is not None:
matplotlib.rcParams.update({'font.size': fontsize})
matplotlib.rcParams.update({"font.size": fontsize})

# Prepare data
n_entries = len(fisher_information_matrices)
size = len(fisher_information_matrices[1])
Expand Down Expand Up @@ -1292,7 +1293,7 @@ def plot_distribution_of_information(
# xsec plot
fig = plt.figure(figsize=figsize)
ax1 = plt.subplot(111)
#fig.subplots_adjust(left=0.1667, right=0.8333, bottom=0.17, top=0.97)
# fig.subplots_adjust(left=0.1667, right=0.8333, bottom=0.17, top=0.97)

if log_xsec:
ax1.set_yscale("log")
Expand Down
2 changes: 1 addition & 1 deletion madminer/utils/ml/models/batch_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def generate_samples(self, n_samples=1, u=None, **kwargs):
def to(self, *args, **kwargs):
logger.debug("Transforming BatchNorm to %s", args)

self = super().to(*args, **kwargs)
self = super(BatchNorm, self).to(*args, **kwargs)

self.running_mean = self.running_mean.to(*args, **kwargs)
self.running_var = self.running_var.to(*args, **kwargs)
Expand Down
4 changes: 2 additions & 2 deletions madminer/utils/ml/models/made.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def to(self, *args, **kwargs):
self.to_args = args
self.to_kwargs = kwargs

self = super().to(*args, **kwargs)
self = super(GaussianMADE, self).to(*args, **kwargs)

for i, (M, W, b) in enumerate(zip(self.Ms, self.Ws, self.bs)):
self.Ms[i] = M.to(*args, **kwargs)
Expand Down Expand Up @@ -314,7 +314,7 @@ def to(self, *args, **kwargs):
self.to_args = args
self.to_kwargs = kwargs

self = super().to(*args, **kwargs)
self = super(ConditionalGaussianMADE, self).to(*args, **kwargs)

for i, (M, W, b) in enumerate(zip(self.Ms, self.Ws, self.bs)):
self.Ms[i] = M.to(*args, **kwargs)
Expand Down
2 changes: 1 addition & 1 deletion madminer/utils/ml/models/made_mog.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def to(self, *args, **kwargs):
self.to_args = args
self.to_kwargs = kwargs

self = super().to(*args, **kwargs)
self = super(ConditionalMixtureMADE, self).to(*args, **kwargs)

for i, M in enumerate(self.Ms):
self.Ms[i] = M.to(*args, **kwargs)
Expand Down
4 changes: 2 additions & 2 deletions madminer/utils/ml/models/maf.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def to(self, *args, **kwargs):
self.to_args = args
self.to_kwargs = kwargs

self = super().to(*args, **kwargs)
self = super(MaskedAutoregressiveFlow, self).to(*args, **kwargs)

for i, (made) in enumerate(self.mades):
self.mades[i] = made.to(*args, **kwargs)
Expand Down Expand Up @@ -260,7 +260,7 @@ def to(self, *args, **kwargs):
self.to_args = args
self.to_kwargs = kwargs

self = super().to(*args, **kwargs)
self = super(ConditionalMaskedAutoregressiveFlow, self).to(*args, **kwargs)

for i, (made) in enumerate(self.mades):
self.mades[i] = made.to(*args, **kwargs)
Expand Down
2 changes: 1 addition & 1 deletion madminer/utils/ml/models/maf_mog.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def to(self, *args, **kwargs):
self.to_args = args
self.to_kwargs = kwargs

self = super().to(*args, **kwargs)
self = super(ConditionalMixtureMaskedAutoregressiveFlow, self).to(*args, **kwargs)

for i, (made) in enumerate(self.mades):
self.mades[i] = made.to(*args, **kwargs)
Expand Down

0 comments on commit e70521b

Please sign in to comment.