Skip to content

Commit 40084d7

Browse files
committed
WIP
1 parent f6cac43 commit 40084d7

File tree

2 files changed

+13
-7
lines changed

2 files changed

+13
-7
lines changed

econml/iv/nnet/_deepiv.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -94,11 +94,14 @@ def mog_loss_model(n_components, d_t):
9494
# Use logsumexp for numeric stability:
9595
# LL = C - log(sum(exp(-d2/(2*sig^2) + log(pi_i/sig^d))))
9696
def make_logloss(d2, sig, pi):
97+
#values = pi / K.pow(sig, d_t) * K.exp(-d2 / (2 * K.square(sig)))
98+
# return -K.log(K.sum(values, axis=-1))
99+
97100
# logsumexp doesn't exist in keras 2.4; simulate it
98101
values = - d2 / (2 * K.square(sig)) + K.log(pi / K.pow(sig, d_t))
99102
# logsumexp(a,b,c) = log(exp(a)+exp(b)+exp(c)) = log((exp(a-k)+exp(b-k)+exp(c-k))*exp(k))
100103
# = log((exp(a-k)+exp(b-k)+exp(c-k))) + k
101-
mx = K.max(values, axis=-1)
104+
mx = K.stop_gradient(K.max(values, axis=-1))
102105
return -K.log(K.sum(K.exp(values - L.Reshape((-1, 1))(mx)), axis=-1)) - mx
103106

104107
ll = L.Lambda(lambda dsp: make_logloss(*dsp), output_shape=(1,))([d2, sig, pi])
@@ -350,7 +353,7 @@ def fit(self, Y, T, X, Z, *, inference=None):
350353

351354
ll = mog_loss_model(n_components, d_t)([pi, mu, sig, t_in])
352355

353-
model = Model([z_in, x_in, t_in], [ll])
356+
model = Model([z_in, x_in, t_in], [])
354357
model.add_loss(L.Lambda(K.mean)(ll))
355358
model.compile(self._optimizer)
356359
# TODO: do we need to give the user more control over other arguments to fit?
@@ -365,7 +368,7 @@ def fit(self, Y, T, X, Z, *, inference=None):
365368
self._n_samples, self._use_upper_bound_loss, self._n_gradient_samples)
366369

367370
rl = lm([z_in, x_in, y_in])
368-
response_model = Model([z_in, x_in, y_in], [rl])
371+
response_model = Model([z_in, x_in, y_in], [])
369372
response_model.add_loss(L.Lambda(K.mean)(rl))
370373
response_model.compile(self._optimizer)
371374
# TODO: do we need to give the user more control over other arguments to fit?

econml/tests/test_deepiv.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
import keras
1212
import keras.backend as K
13+
import tensorflow
1314

1415
import pytest
1516

@@ -34,7 +35,7 @@ def test_stop_grad(self):
3435
model.compile('nadam')
3536
model.fit([np.array([[1]]), np.array([[2]]), np.array([[0]])])
3637

37-
def test_mog_loss(self):
38+
def test_aaaamog_loss(self):
3839
inputs = [keras.layers.Input(shape=s) for s in [(3,), (3, 2), (3,), (2,)]]
3940
ll_model = keras.engine.Model(inputs, mog_loss_model(3, 2)(inputs))
4041

@@ -489,7 +490,7 @@ def datafunction(n, s, images=False, test=False):
489490
print("losses: {}".format(losses))
490491

491492
@pytest.mark.slow
492-
def test_mog_models(self):
493+
def test_aamog_models(self):
493494
d = 2
494495
n = 5
495496

@@ -518,9 +519,10 @@ def norm(lr):
518519
# pi,mu,sig = MixtureOfGaussians(n, d)(x_network)
519520
# ll = MixtureOfGaussiansLogLoss(n, d)([pi,mu,sig,t_input])
520521
model = keras.engine.Model([x_input, t_input], [ll])
521-
model.add_loss(K.mean(ll))
522+
model.add_loss(ll)
522523
model.compile('nadam')
523-
model.fit([x, t], epochs=5)
524+
model.fit([x, t], np.zeros((5000, 1)), epochs=5)
525+
llm = model.predict([x, t])
524526

525527
# For some reason this doesn't work at all when run against the CNTK backend...
526528
# model.compile('nadam', loss=lambda _,l:l)
@@ -542,6 +544,7 @@ def norm(lr):
542544
t = 10 * np.sin(theta) + np.random.normal(size=(5000, d))
543545
pi, mu, sig = model2.predict([x])
544546
sampled_t = model3.predict([x])
547+
model.summary()
545548
llm = model.predict([x, t])
546549

547550
pi_o = np.tile([[0.25, 0.25, 0.25, 0.25, 0]], (5000, 1))

0 commit comments

Comments
 (0)