'+ escapeHtml(title) + '
' + escapeHtml(summary) +'
diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..0ffd883f --- /dev/null +++ b/404.html @@ -0,0 +1,199 @@ + + +
+ + + + + + + +Page not found
+# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
# Set up model
+
+# Define flows
+K = 32
+torch.manual_seed(0)
+
+latent_size = 4
+b = torch.Tensor([1] * (latent_size // 2) + [0] * (latent_size // 2))
+flows = []
+for i in range(K):
+ s = nf.nets.MLP([latent_size, 4 * latent_size, latent_size], init_zeros=True)
+ t = nf.nets.MLP([latent_size, 4 * latent_size, latent_size], init_zeros=True)
+ if i % 2 == 0:
+ flows += [nf.flows.MaskedAffineFlow(b, t, s)]
+ else:
+ flows += [nf.flows.MaskedAffineFlow(1 - b, t, s)]
+ flows += [nf.flows.ActNorm(latent_size)]
+
+# Set augmented target
+target = nf.distributions.TwoIndependent(nf.distributions.TwoMoons(),
+ nf.distributions.DiagGaussian(2))
+# Set base distribution
+q0 = nf.distributions.DiagGaussian(4)
+
+# Construct flow model
+nfm = nf.NormalizingFlow(q0=q0, flows=flows, p=target)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+nfm = nfm.to(device)
+nfm = nfm.double()
+
+# Initialize ActNorm
+z, _ = nfm.sample(num_samples=2 ** 7)
+z_np = z.to('cpu').data.numpy()
+
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.title("Standard coordinates")
+plt.show()
+
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 2].flatten(), z_np[:, 3].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.title("Augmented coordinates")
+plt.show()
+
# Plot augmented target
+z = target.sample(num_samples=2 ** 16)
+z_np = z.to('cpu').data.numpy()
+
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.title("Standard coordinates")
+plt.show()
+
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 2].flatten(), z_np[:, 3].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.title("Augmented coordinates")
+plt.show()
+
# Train model
+max_iter = 20000
+num_samples = 2 * 10
+anneal_iter = 10000
+show_iter = 1000
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-4, weight_decay=1e-6)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+ loss = nfm.reverse_kld(num_samples, beta=np.min([1., 0.01 + it / anneal_iter]))
+
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned posterior
+ if (it + 1) % show_iter == 0:
+ z, _ = nfm.sample(num_samples=2 ** 14)
+ z_np = z.to('cpu').data.numpy()
+
+ plt.figure(figsize=(15, 15))
+ plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+ plt.gca().set_aspect('equal', 'box')
+ plt.title("Standard coordinates")
+ plt.show()
+
+ plt.figure(figsize=(15, 15))
+ plt.hist2d(z_np[:, 2].flatten(), z_np[:, 3].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+ plt.gca().set_aspect('equal', 'box')
+ plt.title("Augmented coordinates")
+ plt.show()
+
# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot learned distribution
+z, _ = nfm.sample(num_samples=2 ** 16)
+z_np = z.to('cpu').data.numpy()
+
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.title("Standard coordinates")
+plt.show()
+
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 2].flatten(), z_np[:, 3].flatten(), (50, 50), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.title("Augmented coordinates")
+plt.show()
+
This example shows how one can easily change the base distribution with our API. +First, let's look at how the normalizing flow can learn a two moons target distribution with a Gaussian distribution as the base.
+# Import packages
+import torch
+import numpy as np
+
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from matplotlib import cm
+
+from tqdm import tqdm
+
# Set up model
+
+# Define 2D Gaussian base distribution
+base = nf.distributions.base.DiagGaussian(2)
+
+# Define list of flows
+num_layers = 32
+flows = []
+for i in range(num_layers):
+ # Neural network with two hidden layers having 64 units each
+ # Last layer is initialized by zeros making training more stable
+ param_map = nf.nets.MLP([1, 64, 64, 2], init_zeros=True)
+ # Add flow layer
+ flows.append(nf.flows.AffineCouplingBlock(param_map))
+ # Swap dimensions
+ flows.append(nf.flows.Permute(2, mode='swap'))
+
+# Construct flow model
+model = nf.NormalizingFlow(base, flows)
+
# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
# Define target distribution
+target = nf.distributions.TwoMoons()
+
# Plot target distribution
+grid_size = 200
+xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.to(device)
+
+log_prob = target.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Plot initial flow distribution
+model.eval()
+log_prob = model.log_prob(zz).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 4000
+num_samples = 2 ** 9
+show_iter = 500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=5e-4, weight_decay=1e-5)
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ x = target.sample(num_samples).to(device)
+
+ # Compute loss
+ loss = model.forward_kld(x)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ model.eval()
+ log_prob = model.log_prob(zz)
+ model.train()
+ prob = torch.exp(log_prob.to('cpu').view(*xx.shape))
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot target distribution
+f, ax = plt.subplots(1, 2, sharey=True, figsize=(15, 7))
+
+log_prob = target.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[0].pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+
+ax[0].set_aspect('equal', 'box')
+ax[0].set_axis_off()
+ax[0].set_title('Target', fontsize=24)
+
+# Plot learned distribution
+model.eval()
+log_prob = model.log_prob(zz).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[1].pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+
+ax[1].set_aspect('equal', 'box')
+ax[1].set_axis_off()
+ax[1].set_title('Real NVP', fontsize=24)
+
+plt.subplots_adjust(wspace=0.1)
+
+plt.show()
+
Notice there is a bridge between the two modes of the learned target. +This is not a big deal usually since the bridge is really thin, and going to higher dimensional space will make it expoentially unlike to have samples within the bridge. +However, we can see the shape of each mode is also a bit distorted. +So it would be nice to get rid of the bridge. Now let's try to use a Gaussian mixture distribution as our base distribution, instead of a single Gaussian.
+# Set up model
+
+# Define a mixture of Gaussians with 2 modes.
+base = nf.distributions.base.GaussianMixture(2,2, loc=[[-2,0],[2,0]],scale=[[0.3,0.3],[0.3,0.3]])
+
+# Define list of flows
+num_layers = 32
+flows = []
+for i in range(num_layers):
+ # Neural network with two hidden layers having 64 units each
+ # Last layer is initialized by zeros making training more stable
+ param_map = nf.nets.MLP([1, 64, 64, 2], init_zeros=True)
+ # Add flow layer
+ flows.append(nf.flows.AffineCouplingBlock(param_map))
+ # Swap dimensions
+ flows.append(nf.flows.Permute(2, mode='swap'))
+
+# Construct flow model
+model = nf.NormalizingFlow(base, flows).cuda()
+
# Plot initial flow distribution
+model.eval()
+log_prob = model.log_prob(zz).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 4000
+num_samples = 2 ** 9
+show_iter = 500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=5e-4, weight_decay=1e-5)
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ x = target.sample(num_samples).to(device)
+
+ # Compute loss
+ loss = model.forward_kld(x)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ model.eval()
+ log_prob = model.log_prob(zz)
+ model.train()
+ prob = torch.exp(log_prob.to('cpu').view(*xx.shape))
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
Now the modes are in better shape! And there is no bridge between the two modes!
+This is a Neural Spline Flow model which has circularand unbounded random variables combined in one random vector.
+# Import packages
+import torch
+import numpy as np
+
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
# Set up target
+class Target:
+ def __init__(self, ndim, ind_circ):
+ self.ndim = ndim
+ self.ind_circ = ind_circ
+
+ def sample(self, n):
+ s = torch.randn(n, self.ndim)
+ c = torch.rand(n, self.ndim) > 0.6
+ s = c * (0.3 * s - 0.5) + (1 - 1. * c) * (s + 1.3)
+ u = torch.rand(n, len(self.ind_circ))
+ s_ = torch.acos(2 * u - 1)
+ c = torch.rand(n, len(self.ind_circ)) > 0.3
+ s_[c] = -s_[c]
+ s[:, self.ind_circ] = (s_ + 1) % (2 * np.pi) - np.pi
+ return s
+
+# Visualize target
+target = Target(2, [1])
+s = target.sample(1000000)
+plt.hist(s[:, 0].data.numpy(), bins=200)
+plt.show()
+plt.hist(s[:, 1].data.numpy(), bins=200)
+plt.show()
+
base = nf.distributions.UniformGaussian(2, [1], torch.tensor([1., 2 * np.pi]))
+
+# Visualize base
+s = base.sample(1000000)
+plt.hist(s[:, 0].data.numpy(), bins=200)
+plt.show()
+plt.hist(s[:, 1].data.numpy(), bins=200)
+plt.show()
+
# Create normalizing flow
+K = 20
+
+flow_layers = []
+for i in range(K):
+ flow_layers += [nf.flows.CircularAutoregressiveRationalQuadraticSpline(2, 1, 128, [1],
+ tail_bound=torch.tensor([5., np.pi]),
+ permute_mask=True)]
+
+model = nf.NormalizingFlow(base, flow_layers)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
model.eval()
+with torch.no_grad():
+ s, _ = model.sample(50000)
+model.train()
+plt.hist(s[:, 0].cpu().data.numpy(), bins=100)
+plt.show()
+plt.hist(s[:, 1].cpu().data.numpy(), bins=100)
+plt.show()
+
# Train model
+max_iter = 20000
+num_samples = 2 ** 10
+show_iter = 5000
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ x = target.sample(num_samples)
+
+ # Compute loss
+ loss = model.forward_kld(x.to(device))
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned density
+ if (it + 1) % show_iter == 0:
+ model.eval()
+ with torch.no_grad():
+ s, _ = model.sample(50000)
+ model.train()
+ plt.hist(s[:, 0].cpu().data.numpy(), bins=100)
+ plt.show()
+ plt.hist((s[:, 1].cpu().data.numpy() - 1) % (2 * np.pi), bins=100)
+ plt.show()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
In this notebook, we train normalizing flows to fit predefined prior distributions, testing their expressivity. The plots are generated to visualize the learned distributions for given layers $K$, and the training loss is plotted to compare the expressivity of different flows.
+%load_ext autoreload
+%autoreload 2
+
+# Import required packages
+import torch
+import numpy as np
+
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
+print("PyTorch version: %s" % torch.__version__)
+dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+print("Using device: %s" % dev)
+
+#z shape is (batch_size, num_samples, dim)
+
priors = []
+priors.append(nf.distributions.TwoModes(2.0, 0.2))
+priors.append(nf.distributions.Sinusoidal(0.4, 4))
+priors.append(nf.distributions.Sinusoidal_gap(0.4, 4))
+priors.append(nf.distributions.Sinusoidal_split(0.4, 4))
+priors.append(nf.distributions.Smiley(0.15))
+
+
+# Plot prior distributions
+grid_size = 200
+grid_length = 4.0
+grid_shape = ([-grid_length, grid_length], [-grid_length, grid_length])
+
+space_mesh = torch.linspace(-grid_length, grid_length, grid_size)
+xx, yy = torch.meshgrid(space_mesh, space_mesh)
+z = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2)
+z = z.reshape(-1, 2)
+
+K_arr = [2, 8, 32]
+max_iter = 30000
+batch_size = 512
+num_samples = 256
+save_iter = 1000
+
+for k in range(len(priors)):
+ log_prob = priors[k].log_prob(z)
+ prob = torch.exp(log_prob)
+
+ plt.figure(figsize=(10, 10))
+ plt.pcolormesh(xx, yy, prob.reshape(grid_size, grid_size))
+ plt.show()
+
flow_types = ("Planar", "Radial", "NICE", "RealNVP")
+max_iter = 20000
+batch_size = 1024
+plot_batches = 10 ** 2
+plot_samples = 10 ** 4
+save_iter = 50
+
+for name in flow_types:
+ K_arr = [2, 8, 32]
+ for K in K_arr:
+ print("Flow type {} with K = {}".format(name, K))
+ for k in range(len(priors)):
+ if k == 0 or k == 4:
+ anneal_iter = 10000
+ else: # turn annealing off when fitting to sinusoidal distributions
+ anneal_iter = 1
+
+ flows = []
+ b = torch.tensor([0,1])
+ for i in range(K):
+ if name == "Planar":
+ flows += [nf.flows.Planar((2,))]
+ elif name == "Radial":
+ flows += [nf.flows.Radial((2,))]
+ elif name == "NICE":
+ flows += [nf.flows.MaskedAffineFlow(b, nf.nets.MLP([2, 16, 16, 2], init_zeros=True))]
+ elif name == "RealNVP":
+ flows += [nf.flows.MaskedAffineFlow(b, nf.nets.MLP([2, 16, 16, 2], init_zeros=True),
+ nf.nets.MLP([2, 16, 16, 2], init_zeros=True))]
+ b = 1-b # parity alternation for mask
+
+ q0 = nf.distributions.DiagGaussian(2)
+ nfm = nf.NormalizingFlow(p=priors[k], q0=q0, flows=flows)
+ nfm.to(dev) # Move model on GPU if available
+
+ # Train model
+ loss_hist = np.array([])
+ log_q_hist = np.array([])
+ log_p_hist = np.array([])
+ x = torch.zeros(batch_size, device=dev)
+
+ optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-3, weight_decay=1e-3)
+ for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+ loss = nfm.reverse_kld(batch_size, np.min([1.0, 0.01 + it / anneal_iter]))
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ if (it + 1) % save_iter == 0:
+ loss_hist = np.append(loss_hist, loss.cpu().data.numpy())
+
+ # Plot learned posterior distribution
+ z_np = np.zeros((0, 2))
+ for i in range(plot_batches):
+ z, _ = nfm.sample(plot_samples)
+ z_np = np.concatenate((z_np, z.cpu().data.numpy()))
+ plt.figure(figsize=(10, 10))
+ plt.hist2d(z_np[:, 0], z_np[:, 1], (grid_size, grid_size), grid_shape)
+ plt.show()
+ np.save("{}-K={}-k={}".format(name,K,k), (z_np, loss.cpu().data.numpy()))
+
+ # Plot training history
+ plt.figure(figsize=(10, 10))
+ plt.plot(loss_hist, label='loss')
+ plt.legend()
+ plt.show()
+
fig = plt.figure(figsize=(14, 10))
+K_arr = [2, 8, 32]
+nrows=5
+ncols=7
+axes = [ fig.add_subplot(nrows, ncols, r * ncols + c + 1) for r in range(0, nrows) for c in range(0, ncols) ]
+
+for ax in axes:
+ ax.set_xticks([])
+ ax.set_yticks([])
+
+grid_size = 100
+grid_length = 4.0
+grid_shape = ([-grid_length, grid_length], [-grid_length, grid_length])
+
+space_mesh = torch.linspace(-grid_length, grid_length, grid_size)
+xx, yy = torch.meshgrid(space_mesh, space_mesh)
+z = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2)
+z = z.reshape(-1, 2)
+axes[0].annotate('Target', xy=(0.5, 1.10), xytext=(0.5, 1.20), xycoords='axes fraction',
+ fontsize=24, ha='center', va='bottom',
+ arrowprops=dict(arrowstyle='-[, widthB=1.5, lengthB=0.2', lw=2.0))
+for k in range(5):
+ axes[k*ncols].set_ylabel('{}'.format(k+1), rotation=0, fontsize=20, labelpad=15)
+ log_prob = priors[k].log_prob(z)
+ prob = torch.exp(log_prob)
+ axes[k*ncols + 0].pcolormesh(xx, yy, prob.reshape(grid_size, grid_size))
+
+
+for l in range(len(K_arr)):
+ K = K_arr[l]
+ if l == 1:
+ axes[0*ncols + l+1].annotate('Planar flows', xy=(0.5, 1.10), xytext=(0.5, 1.20), xycoords='axes fraction',
+ fontsize=24, ha='center', va='bottom',
+ arrowprops=dict(arrowstyle='-[, widthB=6.0, lengthB=0.2', lw=2.0))
+ axes[4*ncols + l+1].set_xlabel('K = {}'.format(K), fontsize=20)
+ for k in range(5):
+ z_np, _ = np.load("Planar-K={}-k={}.npy".format(K,k), allow_pickle=True)
+ axes[k*ncols + l+1].hist2d(z_np[:, 0], z_np[:, 1], (grid_size, grid_size), grid_shape)
+
+for l in range(len(K_arr)):
+ K = K_arr[l]
+ if l == 1:
+ axes[0*ncols + l+1+len(K_arr)].annotate('Radial flows', xy=(0.5, 1.10), xytext=(0.5, 1.20), xycoords='axes fraction',
+ fontsize=24, ha='center', va='bottom',
+ arrowprops=dict(arrowstyle='-[, widthB=6.0, lengthB=0.2', lw=2.0))
+ axes[4*ncols + l+1+len(K_arr)].set_xlabel('K = {}'.format(K), fontsize=20)
+ for k in range(5):
+ z_np, _ = np.load("Radial-K={}-k={}.npy".format(K,k), allow_pickle=True)
+ axes[k*ncols + l+1+len(K_arr)].hist2d(z_np[:, 0], z_np[:, 1], (grid_size, grid_size), grid_shape)
+
+fig.subplots_adjust(hspace=0.02, wspace=0.02)
+
+for l in range(1,4):
+ for k in range(5):
+ pos1 = axes[k*ncols + l].get_position() # get the original position
+ pos2 = [pos1.x0 + 0.01, pos1.y0, pos1.width, pos1.height]
+ axes[k*ncols + l].set_position(pos2) # set a new position
+
+for l in range(4,7):
+ for k in range(5):
+ pos1 = axes[k*ncols + l].get_position() # get the original position
+ pos2 = [pos1.x0 + 0.02, pos1.y0, pos1.width, pos1.height]
+ axes[k*ncols + l].set_position(pos2) # set a new position
+
from itertools import repeat
+
+k_arr = [0, 2, 4]
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
+markers = ['s', 'o', 'v', 'P', 'd']
+
+for k in range(len(k_arr)):
+ loss = [[] for i in repeat(None, len(flow_types))]
+ for intt, name in enumerate(flow_types):
+ for K in K_arr:
+ _, loss_v = np.load("{}-K={}-k={}.npy".format(name,K,k), allow_pickle=True)
+ loss[intt].append(loss_v)
+ axes[k].plot(K_arr, loss[intt], marker=markers[intt], label=name)
+ axes[k].set_title('Target {}'.format(k_arr[k]+1), fontsize=16)
+ axes[k].set_xlabel('Flow length', fontsize=12)
+ axes[k].set_ylabel('Variational bound (nats)', fontsize=12)
+ axes[k].legend()
+ axes[k].grid('major')
+
+fig.tight_layout(pad=2.0)
+
Here, we train a conditional normalizing flow model $q(x|c)$. Our target $p(x|c)$ is a simple 2D Gaussian $\mathcal{N}(x|\mu, \sigma)$, where we condition on the mean $\mu$ and standard deviation $\sigma$, i.e. $c = (\mu, \sigma)$. We apply conditional autoregressive and coupling neural spline flows as well as a conditional masked autoregressive flow to the problem.
+# Import packages
+import torch
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+
+from tqdm import tqdm
+
# Get device to be used
+device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+
# Define target
+target = nf.distributions.target.ConditionalDiagGaussian()
+context_size = 4
+
+# Plot target
+grid_size = 100
+xx, yy = torch.meshgrid(torch.linspace(-2, 2, grid_size), torch.linspace(-2, 2, grid_size), indexing='ij')
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.to(device)
+context_plot = torch.cat([torch.tensor([0.3, 0.9]).to(device) + torch.zeros_like(zz),
+ 0.6 * torch.ones_like(zz)], dim=-1)
+logp = target.log_prob(zz, context_plot)
+p_target = torch.exp(logp).view(*xx.shape).cpu().data.numpy()
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, p_target, shading='auto')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Define flows
+K = 4
+
+latent_size = 2
+hidden_units = 128
+hidden_layers = 2
+
+flows = []
+for i in range(K):
+ flows += [nf.flows.AutoregressiveRationalQuadraticSpline(latent_size, hidden_layers, hidden_units,
+ num_context_channels=context_size)]
+ flows += [nf.flows.LULinearPermute(latent_size)]
+
+# Set base distribution
+q0 = nf.distributions.DiagGaussian(2, trainable=False)
+
+# Construct flow model
+model = nf.ConditionalNormalizingFlow(q0, flows, target)
+
+# Move model on GPU if available
+model = model.to(device)
+
# Plot initial flow distribution, target as contours
+model.eval()
+log_prob = model.log_prob(zz, context_plot).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy(), shading='auto')
+plt.contour(xx, yy, p_target, cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 5000
+batch_size= 128
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=1e-5)
+
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ context = torch.cat([torch.randn((batch_size, 2), device=device),
+ 0.5 + 0.5 * torch.rand((batch_size, 2), device=device)],
+ dim=-1)
+ x = target.sample(batch_size, context)
+
+ # Compute loss
+ loss = model.forward_kld(x, context)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
100%|████████████████████████████████████████████████████████████| 5000/5000 [01:34<00:00, 52.69it/s] ++
# Plot trained flow distribution, target as contours
+model.eval()
+log_prob = model.log_prob(zz, context_plot).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy(), shading='auto')
+plt.contour(xx, yy, p_target, cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Define flows
+K = 4
+
+latent_size = 2
+hidden_units = 128
+hidden_layers = 2
+
+flows = []
+for i in range(K):
+ flows += [nf.flows.CoupledRationalQuadraticSpline(latent_size, hidden_layers, hidden_units,
+ num_context_channels=context_size)]
+ flows += [nf.flows.LULinearPermute(latent_size)]
+
+# Set base distribution
+q0 = nf.distributions.DiagGaussian(2, trainable=False)
+
+# Construct flow model
+model = nf.ConditionalNormalizingFlow(q0, flows, target)
+
+# Move model on GPU if available
+model = model.to(device)
+
# Plot initial flow distribution, target as contours
+model.eval()
+log_prob = model.log_prob(zz, context_plot).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy(), shading='auto')
+plt.contour(xx, yy, p_target, cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 5000
+batch_size= 128
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=1e-5)
+
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ context = torch.cat([torch.randn((batch_size, 2), device=device),
+ 0.5 + 0.5 * torch.rand((batch_size, 2), device=device)],
+ dim=-1)
+ x = target.sample(batch_size, context)
+
+ # Compute loss
+ loss = model.forward_kld(x, context)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
100%|████████████████████████████████████████████████████████████| 5000/5000 [02:16<00:00, 36.51it/s] ++
# Plot trained flow distribution, target as contours
+model.eval()
+log_prob = model.log_prob(zz, context_plot).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy(), shading='auto')
+plt.contour(xx, yy, p_target, cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Define flows
+K = 4
+
+latent_size = 2
+hidden_units = 128
+num_blocks = 2
+
+flows = []
+for i in range(K):
+ flows += [nf.flows.MaskedAffineAutoregressive(latent_size, hidden_units,
+ context_features=context_size,
+ num_blocks=num_blocks)]
+ flows += [nf.flows.LULinearPermute(latent_size)]
+
+# Set base distribution
+q0 = nf.distributions.DiagGaussian(2, trainable=False)
+
+# Construct flow model
+model = nf.ConditionalNormalizingFlow(q0, flows, target)
+
+# Move model on GPU if available
+model = model.to(device)
+
# Plot initial flow distribution, target as contours
+model.eval()
+log_prob = model.log_prob(zz, context_plot).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy(), shading='auto')
+plt.contour(xx, yy, p_target, cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 5000
+batch_size= 128
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ context = torch.cat([torch.randn((batch_size, 2), device=device),
+ 0.5 + 0.5 * torch.rand((batch_size, 2), device=device)],
+ dim=-1)
+ x = target.sample(batch_size, context)
+
+ # Compute loss
+ loss = model.forward_kld(x, context)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
100%|████████████████████████████████████████████████████████████| 5000/5000 [02:00<00:00, 41.53it/s] ++
# Plot trained flow distribution, target as contours
+model.eval()
+log_prob = model.log_prob(zz, context_plot).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy(), shading='auto')
+plt.contour(xx, yy, p_target, cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Import required packages
+import torch
+import torchvision as tv
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
# Set up model
+
+# Define flows
+L = 3
+K = 16
+torch.manual_seed(0)
+
+input_shape = (3, 32, 32)
+n_dims = np.prod(input_shape)
+channels = 3
+hidden_channels = 256
+split_mode = 'channel'
+scale = True
+num_classes = 10
+
+# Set up flows, distributions and merge operations
+q0 = []
+merges = []
+flows = []
+for i in range(L):
+ flows_ = []
+ for j in range(K):
+ flows_ += [nf.flows.GlowBlock(channels * 2 ** (L + 1 - i), hidden_channels,
+ split_mode=split_mode, scale=scale)]
+ flows_ += [nf.flows.Squeeze()]
+ flows += [flows_]
+ if i > 0:
+ merges += [nf.flows.Merge()]
+ latent_shape = (input_shape[0] * 2 ** (L - i), input_shape[1] // 2 ** (L - i),
+ input_shape[2] // 2 ** (L - i))
+ else:
+ latent_shape = (input_shape[0] * 2 ** (L + 1), input_shape[1] // 2 ** L,
+ input_shape[2] // 2 ** L)
+ q0 += [nf.distributions.ClassCondDiagGaussian(latent_shape, num_classes)]
+
+
+# Construct flow model with the multiscale architecture
+model = nf.MultiscaleFlow(q0, flows, merges)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
# Prepare training data
+batch_size = 128
+
+transform = tv.transforms.Compose([tv.transforms.ToTensor(), nf.utils.Scale(255. / 256.), nf.utils.Jitter(1 / 256.)])
+train_data = tv.datasets.CIFAR10('datasets/', train=True,
+ download=True, transform=transform)
+train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True,
+ drop_last=True)
+
+test_data = tv.datasets.CIFAR10('datasets/', train=False,
+ download=True, transform=transform)
+test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
+
+train_iter = iter(train_loader)
+
# Train model
+max_iter = 20000
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=1e-5)
+
+for i in tqdm(range(max_iter)):
+ try:
+ x, y = next(train_iter)
+ except StopIteration:
+ train_iter = iter(train_loader)
+ x, y = next(train_iter)
+ optimizer.zero_grad()
+ loss = model.forward_kld(x.to(device), y.to(device))
+
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ loss_hist = np.append(loss_hist, loss.detach().to('cpu').numpy())
+ del(x, y, loss)
+
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Model samples
+num_sample = 10
+
+with torch.no_grad():
+ y = torch.arange(num_classes).repeat(num_sample).to(device)
+ x, _ = model.sample(y=y)
+ x_ = torch.clamp(x, 0, 1)
+ plt.figure(figsize=(10, 10))
+ plt.imshow(np.transpose(tv.utils.make_grid(x_, nrow=num_classes).cpu().numpy(), (1, 2, 0)))
+ plt.show()
+
# Get bits per dim
+n = 0
+bpd_cum = 0
+with torch.no_grad():
+ for x, y in iter(test_loader):
+ nll = model(x.to(device), y.to(device))
+ nll_np = nll.cpu().numpy()
+ bpd_cum += np.nansum(nll_np / np.log(2) / n_dims + 8)
+ n += len(x) - np.sum(np.isnan(nll_np))
+
+ print('Bits per dim: ', bpd_cum / n)
+
Here, we show how a flow can be trained to generate images with the normflows
package. The flow is a class-conditional Glow model, which is based on the multi-scale architecture. This Glow model is applied to the CIFAR-10 dataset.
To get started, we have to install the normflows
package.
!pip install normflows
+
# Import required packages
+import torch
+import torchvision as tv
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
Now that we imported the necessary packages, we create a flow model. Glow consists of nf.flows.GlowBlocks
, that are arranged in a nf.MultiscaleFlow
, following the multi-scale architecture. The base distribution is a nf.distributions.ClassCondDiagGaussian
, which is a diagonal Gaussian with mean and standard deviation dependent on the class label of the image.
# Set up model
+
+# Define flows
+L = 3
+K = 16
+torch.manual_seed(0)
+
+input_shape = (3, 32, 32)
+n_dims = np.prod(input_shape)
+channels = 3
+hidden_channels = 256
+split_mode = 'channel'
+scale = True
+num_classes = 10
+
+# Set up flows, distributions and merge operations
+q0 = []
+merges = []
+flows = []
+for i in range(L):
+ flows_ = []
+ for j in range(K):
+ flows_ += [nf.flows.GlowBlock(channels * 2 ** (L + 1 - i), hidden_channels,
+ split_mode=split_mode, scale=scale)]
+ flows_ += [nf.flows.Squeeze()]
+ flows += [flows_]
+ if i > 0:
+ merges += [nf.flows.Merge()]
+ latent_shape = (input_shape[0] * 2 ** (L - i), input_shape[1] // 2 ** (L - i),
+ input_shape[2] // 2 ** (L - i))
+ else:
+ latent_shape = (input_shape[0] * 2 ** (L + 1), input_shape[1] // 2 ** L,
+ input_shape[2] // 2 ** L)
+ q0 += [nf.distributions.ClassCondDiagGaussian(latent_shape, num_classes)]
+
+
+# Construct flow model with the multiscale architecture
+model = nf.MultiscaleFlow(q0, flows, merges)
+
# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
With torchvision
we can download the CIFAR-10 dataset.
# Prepare training data
+batch_size = 128
+
+transform = tv.transforms.Compose([tv.transforms.ToTensor(), nf.utils.Scale(255. / 256.), nf.utils.Jitter(1 / 256.)])
+train_data = tv.datasets.CIFAR10('datasets/', train=True,
+ download=True, transform=transform)
+train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True,
+ drop_last=True)
+
+test_data = tv.datasets.CIFAR10('datasets/', train=False,
+ download=True, transform=transform)
+test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
+
+train_iter = iter(train_loader)
+
Now, can train the model on the image data.
+# Train model
+max_iter = 20000
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=1e-5)
+
+for i in tqdm(range(max_iter)):
+ try:
+ x, y = next(train_iter)
+ except StopIteration:
+ train_iter = iter(train_loader)
+ x, y = next(train_iter)
+ optimizer.zero_grad()
+ loss = model.forward_kld(x.to(device), y.to(device))
+
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ loss_hist = np.append(loss_hist, loss.detach().to('cpu').numpy())
+
plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
To evaluate our model, we first draw samples from our model. When sampling, we can specify the classes, so we draw then samples from each class.
+# Model samples
+num_sample = 10
+
+with torch.no_grad():
+ y = torch.arange(num_classes).repeat(num_sample).to(device)
+ x, _ = model.sample(y=y)
+ x_ = torch.clamp(x, 0, 1)
+ plt.figure(figsize=(10, 10))
+ plt.imshow(np.transpose(tv.utils.make_grid(x_, nrow=num_classes).cpu().numpy(), (1, 2, 0)))
+ plt.show()
+
For quantitative evaluation, we can compute the bits per dim of our model.
+# Get bits per dim
+n = 0
+bpd_cum = 0
+with torch.no_grad():
+ for x, y in iter(test_loader):
+ nll = model(x.to(device), y.to(device))
+ nll_np = nll.cpu().numpy()
+ bpd_cum += np.nansum(nll_np / np.log(2) / n_dims + 8)
+ n += len(x) - np.sum(np.isnan(nll_np))
+
+ print('Bits per dim: ', bpd_cum / n)
+
Note that to get competitive performance, a much larger model then specified in this notebook, which is trained over 100 thousand to 1 million iterations, is needed.
+# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
# Set up model
+
+# Define flows
+K = 32
+#torch.manual_seed(0)
+
+b = torch.tensor([0, 1])
+flows = []
+for i in range(K):
+ s = nf.nets.MLP([2, 4, 4, 2])
+ t = nf.nets.MLP([2, 4, 4, 2])
+ if i % 2 == 0:
+ flows += [nf.flows.MaskedAffineFlow(b, t, s)]
+ else:
+ flows += [nf.flows.MaskedAffineFlow(1 - b, t, s)]
+
+# Set target and base distribution
+img = 1 - plt.imread('img.png')[:, :, 0] # Specify the path to your image here
+target = nf.distributions.ImagePrior(img)
+q0 = nf.distributions.DiagGaussian(2)
+
+# Construct flow model
+nfm = nf.NormalizingFlow(q0=q0, flows=flows, p=target)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+nfm = nfm.to(device)
+nfm = nfm.double()
+
# Plot prior distribution
+grid_size = 200
+xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.double().to(device)
+log_prob = target.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.show()
+
+# Plot initial posterior distribution
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.show()
+
# Train model
+max_iter = 10000
+num_samples = 2 * 16
+show_iter = 2000
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-4, weight_decay=1e-4)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+ x = nfm.p.sample(num_samples).double()
+ loss = nfm.forward_kld(x)
+ loss.backward()
+ optimizer.step()
+
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+ prob = torch.exp(log_prob)
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(10, 10))
+ plt.pcolormesh(xx, yy, prob.data.numpy())
+ plt.show()
+
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot learned distribution
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.show()
+
# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from sklearn.datasets import make_moons
+
+from matplotlib import pyplot as plt
+
+from tqdm import tqdm
+
# Set up model
+
+# Define flows
+K = 16
+torch.manual_seed(0)
+
+latent_size = 2
+hidden_units = 128
+hidden_layers = 2
+
+flows = []
+for i in range(K):
+ flows += [nf.flows.AutoregressiveRationalQuadraticSpline(latent_size, hidden_layers, hidden_units)]
+ flows += [nf.flows.LULinearPermute(latent_size)]
+
+# Set base distribuiton
+q0 = nf.distributions.DiagGaussian(2, trainable=False)
+
+# Construct flow model
+nfm = nf.NormalizingFlow(q0=q0, flows=flows)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+nfm = nfm.to(device)
+
# Plot target distribution
+x_np, _ = make_moons(2 ** 20, noise=0.1)
+plt.figure(figsize=(15, 15))
+plt.hist2d(x_np[:, 0], x_np[:, 1], bins=200)
+plt.show()
+
+# Plot initial flow distribution
+grid_size = 100
+xx, yy = torch.meshgrid(torch.linspace(-1.5, 2.5, grid_size), torch.linspace(-2, 2, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.to(device)
+
+nfm.eval()
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+nfm.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 10000
+num_samples = 2 ** 9
+show_iter = 500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-3, weight_decay=1e-5)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ x_np, _ = make_moons(num_samples, noise=0.1)
+ x = torch.tensor(x_np).float().to(device)
+
+ # Compute loss
+ loss = nfm.forward_kld(x)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ nfm.eval()
+ log_prob = nfm.log_prob(zz)
+ nfm.train()
+ prob = torch.exp(log_prob.to('cpu').view(*xx.shape))
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(xx, yy, prob.data.numpy())
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot learned distribution
+nfm.eval()
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+nfm.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
We aim to approximate a distribution having as circular and a normal coordinate. To construct such a case, let $x$ be the normal (unbound) coordinate follow a standard normal distribution, i.e. +$$ p(x) = \frac{1}{\sqrt{2\pi}} e^{-\frac{1}{2} x ^ 2}.$$ +The circular random variable $\phi$ follows a von Mises distribution given by +$$ p(\phi|x) = \frac{1}{2\pi I_0(1)} e^{\cos(\phi-\mu(x))}, $$ +where $I_0$ is the $0^\text{th}$ order Bessel function of the first kind and we set $\mu(x) = 3x$. Hence, our full target is given by +$$ p(x, \phi) = p(x)p(\phi|x) = \frac{1}{(2\pi)^{\frac{3}{2}} I_0(1)} e^{-\frac{1}{2} x ^ 2 + \cos(\phi-3x)}. $$ +We use a neural spline flow that models the two coordinates accordingly.
+# Import packages
+import torch
+import numpy as np
+
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from matplotlib import cm
+
+from tqdm import tqdm
+
This is our target $p(x, \phi)$.
+# Set up target
+class GaussianVonMises(nf.distributions.Target):
+ def __init__(self):
+ super().__init__(prop_scale=torch.tensor(2 * np.pi),
+ prop_shift=torch.tensor(-np.pi))
+ self.n_dims = 2
+ self.max_log_prob = -1.99
+ self.log_const = -1.5 * np.log(2 * np.pi) - np.log(np.i0(1))
+
+ def log_prob(self, x):
+ return -0.5 * x[:, 0] ** 2 + torch.cos(x[:, 1] - 3 * x[:, 0]) + self.log_const
+
target = GaussianVonMises()
+
# Plot target
+grid_size = 300
+xx, yy = torch.meshgrid(torch.linspace(-2.5, 2.5, grid_size), torch.linspace(-np.pi, np.pi, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+
+log_prob = target.log_prob(zz).view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
base = nf.distributions.UniformGaussian(2, [1], torch.tensor([1., 2 * np.pi]))
+
+K = 12
+
+flow_layers = []
+for i in range(K):
+ flow_layers += [nf.flows.CircularAutoregressiveRationalQuadraticSpline(2, 1, 512, [1], num_bins=10,
+ tail_bound=torch.tensor([5., np.pi]),
+ permute_mask=True)]
+
+model = nf.NormalizingFlow(base, flow_layers, target)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
# Plot model
+log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 10000
+num_samples = 2 ** 14
+show_iter = 2500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
+scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, max_iter)
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Compute loss
+ loss = model.reverse_kld(num_samples)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned model
+ if (it + 1) % show_iter == 0:
+ model.eval()
+ with torch.no_grad():
+ log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+ model.train()
+ prob = torch.exp(log_prob)
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+ # Iterate scheduler
+ scheduler.step()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# 2D plot
+f, ax = plt.subplots(1, 2, sharey=True, figsize=(15, 7))
+
+log_prob = target.log_prob(zz).view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[0].pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+ax[0].set_aspect('equal', 'box')
+
+ax[0].set_xticks(ticks=[-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
+ax[0].set_xticklabels(['$-\pi$', r'$-\frac{\pi}{2}$', '$0$', r'$\frac{\pi}{2}$', '$\pi$'],
+ fontsize=20)
+ax[0].set_yticks(ticks=[-2, -1, 0, 1, 2])
+ax[0].set_yticklabels(['$-2$', '$-1$', '$0$', '$1$', '$2$'],
+ fontsize=20)
+ax[0].set_xlabel('$\phi$', fontsize=24)
+ax[0].set_ylabel('$x$', fontsize=24)
+
+ax[0].set_title('Target', fontsize=24)
+
+log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[1].pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+ax[1].set_aspect('equal', 'box')
+
+ax[1].set_xticks(ticks=[-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
+ax[1].set_xticklabels(['$-\pi$', r'$-\frac{\pi}{2}$', '$0$', r'$\frac{\pi}{2}$', '$\pi$'],
+ fontsize=20)
+ax[1].set_xlabel('$\phi$', fontsize=24)
+
+ax[1].set_title('Neural Spline Flow', fontsize=24)
+
+plt.subplots_adjust(wspace=0.1)
+
+plt.show()
+
# 3D plot
+fig = plt.figure(figsize=(15, 7))
+ax1 = fig.add_subplot(1, 2, 1, projection='3d')
+ax2 = fig.add_subplot(1, 2, 2, projection='3d')
+
+phi = np.linspace(-np.pi, np.pi, grid_size)
+z = np.linspace(-2.5, 2.5, grid_size)
+
+# create the surface
+x = np.outer(np.ones(grid_size), np.cos(phi))
+y = np.outer(np.ones(grid_size), np.sin(phi))
+z = np.outer(z, np.ones(grid_size))
+
+# Target
+log_prob = target.log_prob(zz).view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+prob_vis = prob / torch.max(prob)
+myheatmap = prob_vis.data.numpy()
+
+ax1._axis3don = False
+ax1.plot_surface(x, y, z, cstride=1, rstride=1, facecolors=cm.coolwarm(myheatmap), shade=False)
+
+ax1.set_title('Target', fontsize=24, y=0.97, pad=0)
+
+# Model
+log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+prob_vis = prob / torch.max(prob)
+myheatmap = prob_vis.data.numpy()
+
+ax2._axis3don = False
+ax2.plot_surface(x, y, z, cstride=1, rstride=1, facecolors=cm.coolwarm(myheatmap), shade=False)
+
+t = ax2.set_title('Neural Spline Flow', fontsize=24, y=0.97, pad=0)
+
+plt.subplots_adjust(wspace=-0.4)
+
+plt.show()
+
This is the example we consider in our paper about the normflows
package.
We aim to approximate a distribution having as circular and a normal coordinate. To construct such a case, let $x$ be the normal (unbound) coordinate follow a standard normal distribution, i.e. +$$ p(x) = \frac{1}{\sqrt{2\pi}} e^{-\frac{1}{2} x ^ 2}.$$ +The circular random variable $\phi$ follows a von Mises distribution given by +$$ p(\phi|x) = \frac{1}{2\pi I_0(1)} e^{\cos(\phi-\mu(x))}, $$ +where $I_0$ is the $0^\text{th}$ order Bessel function of the first kind and we set $\mu(x) = 3x$. Hence, our full target is given by +$$ p(x, \phi) = p(x)p(\phi|x) = \frac{1}{(2\pi)^{\frac{3}{2}} I_0(1)} e^{-\frac{1}{2} x ^ 2 + \cos(\phi-3x)}. $$ +We use a neural spline flow that models the two coordinates accordingly.
+# Install normflows in Colab
+!pip install normflows
+
# Import packages
+import torch
+import numpy as np
+
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from matplotlib import cm
+
+from tqdm import tqdm
+
This is our target $p(x, \phi)$.
+# Set up target
+class GaussianVonMises(nf.distributions.Target):
+ def __init__(self):
+ super().__init__(prop_scale=torch.tensor(2 * np.pi),
+ prop_shift=torch.tensor(-np.pi))
+ self.n_dims = 2
+ self.max_log_prob = -1.99
+ self.log_const = -1.5 * np.log(2 * np.pi) - np.log(np.i0(1))
+
+ def log_prob(self, x):
+ return -0.5 * x[:, 0] ** 2 + torch.cos(x[:, 1] - 3 * x[:, 0]) + self.log_const
+
target = GaussianVonMises()
+
# Plot target
+grid_size = 300
+xx, yy = torch.meshgrid(torch.linspace(-2.5, 2.5, grid_size), torch.linspace(-np.pi, np.pi, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+
+log_prob = target.log_prob(zz).view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
base = nf.distributions.UniformGaussian(2, [1], torch.tensor([1., 2 * np.pi]))
+
+K = 12
+
+flow_layers = []
+for i in range(K):
+ flow_layers += [nf.flows.CircularAutoregressiveRationalQuadraticSpline(2, 1, 512, [1], num_bins=10,
+ tail_bound=torch.tensor([5., np.pi]),
+ permute_mask=True)]
+
+model = nf.NormalizingFlow(base, flow_layers, target)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
# Plot model
+log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 10000
+num_samples = 2 ** 14
+show_iter = 2500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
+scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, max_iter)
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Compute loss
+ loss = model.reverse_kld(num_samples)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned model
+ if (it + 1) % show_iter == 0:
+ model.eval()
+ with torch.no_grad():
+ log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+ model.train()
+ prob = torch.exp(log_prob)
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+ # Iterate scheduler
+ scheduler.step()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# 2D plot
+f, ax = plt.subplots(1, 2, sharey=True, figsize=(15, 7))
+
+log_prob = target.log_prob(zz).view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[0].pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+ax[0].set_aspect('equal', 'box')
+
+ax[0].set_xticks(ticks=[-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
+ax[0].set_xticklabels(['$-\pi$', r'$-\frac{\pi}{2}$', '$0$', r'$\frac{\pi}{2}$', '$\pi$'],
+ fontsize=20)
+ax[0].set_yticks(ticks=[-2, -1, 0, 1, 2])
+ax[0].set_yticklabels(['$-2$', '$-1$', '$0$', '$1$', '$2$'],
+ fontsize=20)
+ax[0].set_xlabel('$\phi$', fontsize=24)
+ax[0].set_ylabel('$x$', fontsize=24)
+
+ax[0].set_title('Target', fontsize=24)
+
+log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[1].pcolormesh(yy, xx, prob.data.numpy(), cmap='coolwarm')
+ax[1].set_aspect('equal', 'box')
+
+ax[1].set_xticks(ticks=[-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
+ax[1].set_xticklabels(['$-\pi$', r'$-\frac{\pi}{2}$', '$0$', r'$\frac{\pi}{2}$', '$\pi$'],
+ fontsize=20)
+ax[1].set_xlabel('$\phi$', fontsize=24)
+
+ax[1].set_title('Neural Spline Flow', fontsize=24)
+
+plt.subplots_adjust(wspace=0.1)
+
+plt.show()
+
# 3D plot
+fig = plt.figure(figsize=(15, 7))
+ax1 = fig.add_subplot(1, 2, 1, projection='3d')
+ax2 = fig.add_subplot(1, 2, 2, projection='3d')
+
+phi = np.linspace(-np.pi, np.pi, grid_size)
+z = np.linspace(-2.5, 2.5, grid_size)
+
+# create the surface
+x = np.outer(np.ones(grid_size), np.cos(phi))
+y = np.outer(np.ones(grid_size), np.sin(phi))
+z = np.outer(z, np.ones(grid_size))
+
+# Target
+log_prob = target.log_prob(zz).view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+prob_vis = prob / torch.max(prob)
+myheatmap = prob_vis.data.numpy()
+
+ax1._axis3don = False
+ax1.plot_surface(x, y, z, cstride=1, rstride=1, facecolors=cm.coolwarm(myheatmap), shade=False)
+
+ax1.set_title('Target', fontsize=24, y=0.97, pad=0)
+
+# Model
+log_prob = model.log_prob(zz.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+prob_vis = prob / torch.max(prob)
+myheatmap = prob_vis.data.numpy()
+
+ax2._axis3don = False
+ax2.plot_surface(x, y, z, cstride=1, rstride=1, facecolors=cm.coolwarm(myheatmap), shade=False)
+
+t = ax2.set_title('Neural Spline Flow', fontsize=24, y=0.97, pad=0)
+
+plt.show()
+
from __future__ import print_function
+import torch
+import torch.utils.data
+from torch import nn, optim
+from torch.distributions.normal import Normal
+from torch.nn import functional as F
+from torchvision import datasets, transforms
+from tqdm import tqdm
+import argparse
+from datetime import datetime
+import os
+import pandas as pd
+
parser = argparse.ArgumentParser(description="FlowVAE implementation on MNIST")
+parser.add_argument(
+ "--batch-size",
+ type=int,
+ default=256,
+ metavar="N",
+ help="Training batch size (default: 256)",
+)
+parser.add_argument(
+ "--latent-size",
+ type=int,
+ default=40,
+ metavar="N",
+ help="Latent dimension size (default: 40)",
+)
+parser.add_argument(
+ "--epochs",
+ type=int,
+ default=15,
+ metavar="N",
+ help="Nr of training epochs (default: 15)",
+)
+parser.add_argument(
+ "--dataset",
+ type=str,
+ default="mnist",
+ metavar="N",
+ help="Dataset to train and test on (mnist, cifar10 or cifar100) (default: mnist)",
+)
+parser.add_argument(
+ "--no-cuda", action="store_true", default=False, help="enables CUDA training"
+)
+parser.add_argument(
+ "--seed", type=int, default=15, metavar="S", help="Random Seed (default: 1)"
+)
+parser.add_argument(
+ "--log-intv",
+ type=int,
+ default=20,
+ metavar="N",
+ help="Training log status interval (default: 20",
+)
+parser.add_argument(
+ "--experiment_mode",
+ type=bool,
+ default=False,
+ metavar="N",
+ help="Experiment mode (conducts 10 runs and saves results as DataFrame (default: False)",
+)
+parser.add_argument(
+ "--runs",
+ type=int,
+ default=10,
+ metavar="N",
+ help="Number of runs in experiment_mode (experiment_mode has to be turned to True to use) (default: 10)",
+)
+
args = parser.parse_args()
+args.cuda = not args.no_cuda and torch.cuda.is_available()
+
torch.manual_seed(args.seed)
+
device = torch.device("cuda" if args.cuda else "cpu")
+
if args.dataset == "mnist":
+ img_dim = 28
+elif args.dataset == "cifar10" or args.dataset == "cifar100":
+ img_dim = 32
+else:
+ raise ValueError("The only dataset calls supported are: mnist, cifar10, cifar100")
+
class VAE(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.encode = nn.Sequential(
+ nn.Linear(img_dim**2, 512),
+ nn.ReLU(True),
+ nn.Linear(512, 256),
+ nn.ReLU(True),
+ )
+ self.f1 = nn.Linear(256, args.latent_size)
+ self.f2 = nn.Linear(256, args.latent_size)
+ self.decode = nn.Sequential(
+ nn.Linear(args.latent_size, 256),
+ nn.ReLU(True),
+ nn.Linear(256, 512),
+ nn.ReLU(True),
+ nn.Linear(512, img_dim**2),
+ )
+
+ def forward(self, x):
+ # Encode
+ mu, log_var = self.f1(
+ self.encode(x.view(x.size(0) * x.size(1), img_dim**2))
+ ), self.f2(self.encode(x.view(x.size(0) * x.size(1), img_dim**2)))
+
+ # Reparametrize variables
+ std = torch.exp(0.5 * log_var)
+ norm_scale = torch.randn_like(std)
+ z_ = mu + norm_scale * std
+
+ # Q0 and prior
+ q0 = Normal(mu, torch.exp((0.5 * log_var)))
+ p = Normal(0.0, 1.0)
+
+ # Decode
+ z_ = z_.view(z_.size(0), args.latent_size)
+ zD = self.decode(z_)
+ out = torch.sigmoid(zD)
+
+ return out, mu, log_var
+
def bound(rce, x, mu, log_var):
+ kld = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
+ return F.binary_cross_entropy(rce, x.view(-1, img_dim**2), reduction="sum") + kld
+
class BinaryTransform:
+ def __init__(self, thresh=0.5):
+ self.thresh = thresh
+
+ def __call__(self, x):
+ return (x > self.thresh).type(x.type())
+
# Training
+def flow_vae_datasets(
+ id,
+ download=True,
+ batch_size=args.batch_size,
+ shuffle=True,
+ transform=transforms.Compose([transforms.ToTensor(), BinaryTransform()]),
+):
+ data_d_train = {
+ "mnist": datasets.MNIST(
+ "datasets", train=True, download=True, transform=transform
+ ),
+ "cifar10": datasets.CIFAR10(
+ "datasets", train=True, download=True, transform=transform
+ ),
+ "cifar100": datasets.CIFAR100(
+ "datasets", train=True, download=True, transform=transform
+ ),
+ }
+ data_d_test = {
+ "mnist": datasets.MNIST(
+ "datasets", train=False, download=True, transform=transform
+ ),
+ "cifar10": datasets.CIFAR10(
+ "datasets", train=False, download=True, transform=transform
+ ),
+ "cifar100": datasets.CIFAR100(
+ "datasets", train=False, download=True, transform=transform
+ ),
+ }
+ train_loader = torch.utils.data.DataLoader(
+ data_d_train.get(id), batch_size=batch_size, shuffle=shuffle
+ )
+
+ test_loader = torch.utils.data.DataLoader(
+ data_d_test.get(id), batch_size=batch_size, shuffle=shuffle
+ )
+ return train_loader, test_loader
+
model = VAE().to(device)
+
optimizer = optim.Adam(model.parameters(), lr=0.001)
+# train_losses = []
+train_loader, test_loader = flow_vae_datasets(args.dataset)
+
Train
+def train(model, epoch):
+ model.train()
+ tr_loss = 0
+ progressbar = tqdm(enumerate(train_loader), total=len(train_loader))
+ for batch_n, (x, n) in progressbar:
+ x = x.to(device)
+ optimizer.zero_grad()
+ rc_batch, mu, log_var = model(x)
+ loss = bound(rc_batch, x.view(x.size(0) * x.size(1), img_dim**2), mu, log_var)
+ loss.backward()
+ tr_loss += loss.item()
+ optimizer.step()
+ progressbar.update()
+ if batch_n % args.log_intv == 0:
+ print(
+ "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
+ epoch,
+ batch_n * len(x),
+ len(train_loader.dataset),
+ 100.0 * batch_n / len(train_loader),
+ loss.item() / len(x),
+ )
+ )
+ progressbar.close()
+ print(
+ "====> Epoch: {} Average loss: {:.4f}".format(
+ epoch, tr_loss / len(train_loader.dataset)
+ )
+ )
+
def test(model, epoch):
+ model.eval()
+ test_loss = 0
+ with torch.no_grad():
+ for i, (x, _) in enumerate(test_loader):
+ x = x.to(device)
+ rc_batch, mu, log_var = model(x)
+ test_loss += bound(rc_batch, x, mu, log_var).item()
+
+ test_loss /= len(test_loader.dataset)
+ print("====> Test set loss: {:.4f}".format(test_loss))
+ return test_loss
+
test_losses = []
+if __name__ == "__main__":
+ if args.experiment_mode:
+ min_test_losses = []
+ min_test_losses.append(str(args))
+ for i in range(args.runs):
+ test_losses = []
+ model.__init__()
+ model = model.to(device)
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
+ if i == 0:
+ seed = args.seed
+ else:
+ seed += 1
+ torch.manual_seed(seed)
+ for e in range(args.epochs):
+ train(model, e)
+ tl = test(model, e)
+ test_losses.append(tl)
+ print("====> Lowest test set loss: {:.4f}".format(min(test_losses)))
+ min_test_losses.append(min(test_losses))
+ Series = pd.Series(min_test_losses)
+
+ dirName = "experiments"
+ if not os.path.exists(dirName):
+ os.mkdir(dirName)
+ else:
+ pass
+ file_name = dirName + "/{}.xlsx".format(str(datetime.now()))
+ file_name = file_name.replace(":", "-")
+ Series.to_excel(file_name, index=False, header=None)
+ else:
+ for e in range(args.epochs):
+ train(model, e)
+ tl = test(model, e)
+ test_losses.append(tl)
+
# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
K = 16
+#torch.manual_seed(0)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+
+flows = []
+for i in range(K):
+ flows += [nf.flows.Planar((2,))]
+target = nf.distributions.TwoModes(2, 0.1)
+
+q0 = nf.distributions.DiagGaussian(2)
+nfm = nf.NormalizingFlow(q0=q0, flows=flows, p=target)
+nfm.to(device)
+
# Plot target distribution
+grid_size = 200
+xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size))
+z = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+log_prob = target.log_prob(z.to(device)).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+
+plt.figure(figsize=(10, 10))
+plt.pcolormesh(xx, yy, prob)
+plt.show()
+
+# Plot initial flow distribution
+z, _ = nfm.sample(num_samples=2 ** 20)
+z_np = z.to('cpu').data.numpy()
+plt.figure(figsize=(10, 10))
+plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]])
+plt.show()
+
# Train model
+max_iter = 20000
+num_samples = 2 * 20
+anneal_iter = 10000
+annealing = True
+show_iter = 2000
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-3, weight_decay=1e-4)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+ if annealing:
+ loss = nfm.reverse_kld(num_samples, beta=np.min([1., 0.01 + it / anneal_iter]))
+ else:
+ loss = nfm.reverse_kld(num_samples)
+ loss.backward()
+ optimizer.step()
+
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ torch.cuda.manual_seed(0)
+ z, _ = nfm.sample(num_samples=2 ** 20)
+ z_np = z.to('cpu').data.numpy()
+
+ plt.figure(figsize=(10, 10))
+ plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]])
+ plt.show()
+
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot learned distribution
+z, _ = nfm.sample(num_samples=2 ** 20)
+z_np = z.to('cpu').data.numpy()
+plt.figure(figsize=(10, 10))
+plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]])
+plt.show()
+
# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+from tqdm import tqdm
+
# Set up model
+
+# Define flows
+K = 64
+torch.manual_seed(0)
+
+latent_size = 2
+b = torch.Tensor([1 if i % 2 == 0 else 0 for i in range(latent_size)])
+flows = []
+for i in range(K):
+ s = nf.nets.MLP([latent_size, 2 * latent_size, latent_size], init_zeros=True)
+ t = nf.nets.MLP([latent_size, 2 * latent_size, latent_size], init_zeros=True)
+ if i % 2 == 0:
+ flows += [nf.flows.MaskedAffineFlow(b, t, s)]
+ else:
+ flows += [nf.flows.MaskedAffineFlow(1 - b, t, s)]
+ flows += [nf.flows.ActNorm(latent_size)]
+
+# Set target and q0
+target = nf.distributions.TwoModes(2, 0.1)
+q0 = nf.distributions.DiagGaussian(2)
+
+# Construct flow model
+nfm = nf.NormalizingFlow(q0=q0, flows=flows, p=target)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+nfm = nfm.to(device)
+nfm = nfm.double()
+
+# Initialize ActNorm
+z, _ = nfm.sample(num_samples=2 ** 7)
+z_np = z.to('cpu').data.numpy()
+plt.figure(figsize=(15, 15))
+plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (200, 200), range=[[-3, 3], [-3, 3]])
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Plot target distribution
+grid_size = 200
+xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.double().to(device)
+log_prob = target.log_prob(zz).to('cpu').view(*xx.shape)
+prob_target = torch.exp(log_prob)
+
+# Plot initial posterior distribution
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.contour(xx, yy, prob_target.data.numpy(), cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 20000
+num_samples = 2 * 10
+anneal_iter = 10000
+annealing = True
+show_iter = 1000
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-4, weight_decay=1e-6)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+ if annealing:
+ loss = nfm.reverse_kld(num_samples, beta=np.min([1., 0.001 + it / anneal_iter]))
+ else:
+ loss = nfm.reverse_alpha_div(num_samples, dreg=True, alpha=1)
+
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned posterior
+ if (it + 1) % show_iter == 0:
+ log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+ prob = torch.exp(log_prob)
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(xx, yy, prob.data.numpy())
+ plt.contour(xx, yy, prob_target.data.numpy(), cmap=plt.get_cmap('cool'), linewidths=2)
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot learned posterior distribution
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.contour(xx, yy, prob_target.data.numpy(), cmap=plt.get_cmap('cool'), linewidths=2)
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
This notebook illustrates how to use the normflows
packages by training a simple Real NVP model to a 2D distribution consisting on two half moons.
Before we can start, we have to install the package.
+!pip install normflows
+
# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from matplotlib import pyplot as plt
+
+from tqdm import tqdm
+
After importing the required packages, we want to create a nf.NormalizingFlow
model. Therefore, we need a base distribution, which we set to be a Gaussian, and a list of flow layers. The flow layers are simply affine coupling layers, whereby nf.AffineCouplingBlock
already includes the splitting and merging of the features as it is done in coupling. We also swap the features after each layer to ensure that they are all modified.
# Set up model
+
+# Define 2D Gaussian base distribution
+base = nf.distributions.base.DiagGaussian(2)
+
+# Define list of flows
+num_layers = 32
+flows = []
+for i in range(num_layers):
+ # Neural network with two hidden layers having 64 units each
+ # Last layer is initialized by zeros making training more stable
+ param_map = nf.nets.MLP([1, 64, 64, 2], init_zeros=True)
+ # Add flow layer
+ flows.append(nf.flows.AffineCouplingBlock(param_map))
+ # Swap dimensions
+ flows.append(nf.flows.Permute(2, mode='swap'))
+
+# Construct flow model
+model = nf.NormalizingFlow(base, flows)
+
# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+model = model.to(device)
+
This is our target distribution.
+# Define target distribution
+target = nf.distributions.TwoMoons()
+
# Plot target distribution
+grid_size = 200
+xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.to(device)
+
+log_prob = target.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Plot initial flow distribution
+model.eval()
+log_prob = model.log_prob(zz).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
Now, we are ready to train the flow model. This can be done in a similar fashion as standard neural networks. Since we use samples from the target for training, we use the forward KL divergence as objective, which is equivalent to maximum likelihood.
+# Train model
+max_iter = 4000
+num_samples = 2 ** 9
+show_iter = 500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(model.parameters(), lr=5e-4, weight_decay=1e-5)
+
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ x = target.sample(num_samples).to(device)
+
+ # Compute loss
+ loss = model.forward_kld(x)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ model.eval()
+ log_prob = model.log_prob(zz)
+ model.train()
+ prob = torch.exp(log_prob.to('cpu').view(*xx.shape))
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
This is our trained flow model!
+Note that there might be a density filament connecting the two modes, which is due to an architectural limitation of normalizing flows, especially prominent in Real NVP. You can find out more about it in this paper.
+# Plot target distribution
+f, ax = plt.subplots(1, 2, sharey=True, figsize=(15, 7))
+
+log_prob = target.log_prob(zz).to('cpu').view(*xx.shape)
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[0].pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+
+ax[0].set_aspect('equal', 'box')
+ax[0].set_axis_off()
+ax[0].set_title('Target', fontsize=24)
+
+# Plot learned distribution
+model.eval()
+log_prob = model.log_prob(zz).to('cpu').view(*xx.shape)
+model.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+ax[1].pcolormesh(xx, yy, prob.data.numpy(), cmap='coolwarm')
+
+ax[1].set_aspect('equal', 'box')
+ax[1].set_axis_off()
+ax[1].set_title('Real NVP', fontsize=24)
+
+plt.subplots_adjust(wspace=0.1)
+
+plt.show()
+
# Import required packages
+import torch
+import numpy as np
+import normflows as nf
+
+from sklearn.datasets import make_moons
+
+from matplotlib import pyplot as plt
+
+from tqdm import tqdm
+
# Set up model
+
+# Define flows
+K = 16
+torch.manual_seed(0)
+
+latent_size = 2
+hidden_units = 128
+hidden_layers = 3
+
+flows = []
+for i in range(K):
+ net = nf.nets.LipschitzMLP([latent_size] + [hidden_units] * (hidden_layers - 1) + [latent_size],
+ init_zeros=True, lipschitz_const=0.9)
+ flows += [nf.flows.Residual(net, reduce_memory=True)]
+ flows += [nf.flows.ActNorm(latent_size)]
+
+# Set prior and q0
+q0 = nf.distributions.DiagGaussian(2, trainable=False)
+
+# Construct flow model
+nfm = nf.NormalizingFlow(q0=q0, flows=flows)
+
+# Move model on GPU if available
+enable_cuda = True
+device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')
+nfm = nfm.to(device)
+
+# Initialize ActNorm
+x_np, _ = make_moons(2 ** 9, noise=0.1)
+x = torch.tensor(x_np).float().to(device)
+_ = nfm.log_prob(x)
+
# Plot target distribution
+x_np, _ = make_moons(2 ** 20, noise=0.1)
+plt.figure(figsize=(15, 15))
+plt.hist2d(x_np[:, 0], x_np[:, 1], bins=200)
+plt.show()
+
+# Plot initial flow distribution
+grid_size = 100
+xx, yy = torch.meshgrid(torch.linspace(-1.5, 2.5, grid_size), torch.linspace(-2, 2, grid_size))
+zz = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
+zz = zz.to(device)
+
+nfm.eval()
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+nfm.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
# Train model
+max_iter = 20000
+num_samples = 2 ** 9
+show_iter = 500
+
+
+loss_hist = np.array([])
+
+optimizer = torch.optim.Adam(nfm.parameters(), lr=3e-4, weight_decay=1e-5)
+for it in tqdm(range(max_iter)):
+ optimizer.zero_grad()
+
+ # Get training samples
+ x_np, _ = make_moons(num_samples, noise=0.1)
+ x = torch.tensor(x_np).float().to(device)
+
+ # Compute loss
+ loss = nfm.forward_kld(x)
+
+ # Do backprop and optimizer step
+ if ~(torch.isnan(loss) | torch.isinf(loss)):
+ loss.backward()
+ optimizer.step()
+
+ # Make layers Lipschitz continuous
+ nf.utils.update_lipschitz(nfm, 50)
+
+ # Log loss
+ loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
+
+ # Plot learned distribution
+ if (it + 1) % show_iter == 0:
+ nfm.eval()
+ log_prob = nfm.log_prob(zz)
+ nfm.train()
+ prob = torch.exp(log_prob.to('cpu').view(*xx.shape))
+ prob[torch.isnan(prob)] = 0
+
+ plt.figure(figsize=(15, 15))
+ plt.pcolormesh(xx, yy, prob.data.numpy())
+ plt.gca().set_aspect('equal', 'box')
+ plt.show()
+
+# Plot loss
+plt.figure(figsize=(10, 10))
+plt.plot(loss_hist, label='loss')
+plt.legend()
+plt.show()
+
# Plot learned distribution
+nfm.eval()
+log_prob = nfm.log_prob(zz).to('cpu').view(*xx.shape)
+nfm.train()
+prob = torch.exp(log_prob)
+prob[torch.isnan(prob)] = 0
+
+plt.figure(figsize=(15, 15))
+plt.pcolormesh(xx, yy, prob.data.numpy())
+plt.gca().set_aspect('equal', 'box')
+plt.show()
+
from __future__ import print_function
+import torch
+import torch.utils.data
+from torch import nn, optim
+from torch.distributions.normal import Normal
+from torch.nn import functional as F
+from torchvision import datasets, transforms
+from tqdm import tqdm
+from normflows.flows import Planar, Radial, MaskedAffineFlow, BatchNorm
+import argparse
+from datetime import datetime
+import os
+from normflows import nets
+import pandas as pd
+import random
+
parser = argparse.ArgumentParser(description="FlowVAE implementation on MNIST")
+parser.add_argument(
+ "--batch-size",
+ type=int,
+ default=256,
+ metavar="N",
+ help="Training batch size (default: 256)",
+)
+parser.add_argument(
+ "--latent-size",
+ type=int,
+ default=40,
+ metavar="N",
+ help="Latent dimension size (default: 40)",
+)
+parser.add_argument(
+ "--K", type=int, default=10, metavar="N", help="Number of flows (default: 10)"
+)
+parser.add_argument(
+ "--flow",
+ type=str,
+ default="Planar",
+ metavar="N",
+ help="Type of flow (default: Planar)",
+)
+parser.add_argument(
+ "--epochs",
+ type=int,
+ default=15,
+ metavar="N",
+ help="Nr of training epochs (default: 15)",
+)
+parser.add_argument(
+ "--dataset",
+ type=str,
+ default="mnist",
+ metavar="N",
+ help="Dataset to train and test on (mnist, cifar10 or cifar100) (default: mnist)",
+)
+parser.add_argument(
+ "--no-cuda", action="store_true", default=False, help="enables CUDA training"
+)
+parser.add_argument(
+ "--seed", type=int, default=15, metavar="S", help="Random Seed (default: 1)"
+)
+parser.add_argument(
+ "--log-intv",
+ type=int,
+ default=20,
+ metavar="N",
+ help="Training log status interval (default: 20",
+)
+parser.add_argument(
+ "--experiment_mode",
+ type=bool,
+ default=False,
+ metavar="N",
+ help="Experiment mode (conducts 10 runs and saves results as DataFrame (default: False)",
+)
+parser.add_argument(
+ "--runs",
+ type=int,
+ default=10,
+ metavar="N",
+ help="Number of runs in experiment_mode (experiment_mode has to be turned to True to use) (default: 10)",
+)
+
args = parser.parse_args()
+args.cuda = not args.no_cuda and torch.cuda.is_available()
+
torch.manual_seed(args.seed)
+
device = torch.device("cuda" if args.cuda else "cpu")
+
class SimpleFlowModel(nn.Module):
+ def __init__(self, flows):
+ super().__init__()
+ self.flows = nn.ModuleList(flows)
+
+ def forward(self, z):
+ ld = 0.0
+ for flow in self.flows:
+ z, ld_ = flow(z)
+ ld += ld_
+
+ return z, ld
+
class BinaryTransform:
+ def __init__(self, thresh=0.5):
+ self.thresh = thresh
+
+ def __call__(self, x):
+ return (x > self.thresh).type(x.type())
+
class ColourNormalize:
+ def __init__(self, a=0.0, b=0.0):
+ self.a = a
+ self.b = b
+
+ def __call__(self, x):
+ return (self.b - self.a) * x / 255 + self.a
+
if args.dataset == "mnist":
+ img_dim = 28
+ dtf = transforms.Compose([transforms.ToTensor(), BinaryTransform()])
+elif args.dataset == "cifar10" or args.dataset == "cifar100":
+ img_dim = 8
+ dtf = transforms.Compose(
+ [
+ transforms.RandomCrop([8, 8]),
+ transforms.ToTensor(),
+ ColourNormalize(0.0001, 1 - 0.0001),
+ ]
+ )
+else:
+ raise ValueError("The only dataset calls supported are: mnist, cifar10, cifar100")
+
def extract_cifar_patch(tensor, target_size):
+ x = random.randint(0, 32 - target_size)
+ y = random.randint(0, 32 - target_size)
+ return tensor[x : x + target_size, y : y + target_size, :]
+
# Training
+def flow_vae_datasets(
+ id,
+ download=True,
+ batch_size=args.batch_size,
+ shuffle=True,
+ transform=dtf,
+ patch_size=None,
+):
+ data_d_train = {
+ "mnist": datasets.MNIST(
+ "datasets", train=True, download=True, transform=transform
+ ),
+ "cifar10": datasets.CIFAR10(
+ "datasets", train=True, download=True, transform=transform
+ ),
+ "cifar100": datasets.CIFAR100(
+ "datasets", train=True, download=True, transform=transform
+ ),
+ }
+ data_d_test = {
+ "mnist": datasets.MNIST(
+ "datasets", train=False, download=True, transform=transform
+ ),
+ "cifar10": datasets.CIFAR10(
+ "datasets", train=False, download=True, transform=transform
+ ),
+ "cifar100": datasets.CIFAR100(
+ "datasets", train=False, download=True, transform=transform
+ ),
+ }
+
+ # training_data = data_d_train.get(id)
+ # test_data = data_d_test.get(id)
+ # if patch_size is not None:
+ # training_data.data = np.stack(
+ # [extract_cifar_patch(training_data.data[i, :, :], patch_size) for i in range(len(training_data.data))])
+ # test_data.data = np.stack(
+ # [extract_cifar_patch(test_data.data[i, :, :], patch_size) for i in range(len(test_data.data))])
+
+ train_loader = torch.utils.data.DataLoader(
+ data_d_train.get(id), batch_size=batch_size, shuffle=shuffle
+ )
+
+ test_loader = torch.utils.data.DataLoader(
+ data_d_test.get(id), batch_size=batch_size, shuffle=shuffle
+ )
+ return train_loader, test_loader
+
class FlowVAE(nn.Module):
+ def __init__(self, flows):
+ super().__init__()
+ self.encode = nn.Sequential(
+ nn.Linear(img_dim**2, 512),
+ nn.ReLU(True),
+ nn.Linear(512, 256),
+ nn.ReLU(True),
+ )
+ self.f1 = nn.Linear(256, args.latent_size)
+ self.f2 = nn.Linear(256, args.latent_size)
+ self.decode = nn.Sequential(
+ nn.Linear(args.latent_size, 256),
+ nn.ReLU(True),
+ nn.Linear(256, 512),
+ nn.ReLU(True),
+ nn.Linear(512, img_dim**2),
+ )
+ self.flows = flows
+
+ def forward(self, x):
+ # Encode
+ mu, log_var = self.f1(
+ self.encode(x.view(x.size(0) * x.size(1), img_dim**2))
+ ), self.f2(self.encode(x.view(x.size(0) * x.size(1), img_dim**2)))
+
+ # Reparameterize variables
+ std = torch.exp(0.5 * log_var)
+ norm_scale = torch.randn_like(std)
+ z_0 = mu + norm_scale * std
+
+ # Flow transforms
+ z_, log_det = self.flows(z_0)
+ z_ = z_.squeeze()
+
+ # Q0 and prior
+ q0 = Normal(mu, torch.exp((0.5 * log_var)))
+ p = Normal(0.0, 1.0)
+
+ # KLD including logdet term
+ kld = (
+ -torch.sum(p.log_prob(z_), -1)
+ + torch.sum(q0.log_prob(z_0), -1)
+ - log_det.view(-1)
+ )
+ self.test_params = [
+ torch.mean(-torch.sum(p.log_prob(z_), -1)),
+ torch.mean(torch.sum(q0.log_prob(z_0), -1)),
+ torch.mean(log_det.view(-1)),
+ torch.mean(kld),
+ ]
+
+ # Decode
+ z_ = z_.view(z_.size(0), args.latent_size)
+ zD = self.decode(z_)
+ out = torch.sigmoid(zD)
+
+ return out, kld
+
def logit(x):
+ return torch.log(x / (1 - x))
+
def bound(rce, x, kld, beta):
+ if args.dataset == "mnist":
+ return (
+ F.binary_cross_entropy(rce, x.view(-1, img_dim**2), reduction="sum")
+ + beta * kld
+ )
+ elif args.dataset == "cifar10" or args.dataset == "cifar100":
+ # return (- torch.distributions.Normal(x.view(-1, img_dim ** 2), 1.).log_prob(rce)).sum() + beta * kld
+ return F.mse_loss(rce, x, reduction="sum") + beta * kld
+
if args.flow == "Planar":
+ flows = SimpleFlowModel([Planar((args.latent_size,)) for k in range(args.K)])
+elif args.flow == "Radial":
+ flows = SimpleFlowModel([Radial((args.latent_size,)) for k in range(args.K)])
+elif args.flow == "RealNVP":
+ b = torch.Tensor([1 if i % 2 == 0 else 0 for i in range(args.latent_size)])
+ flows = []
+ for i in range(args.K):
+ s = nets.MLP([args.latent_size, 8, args.latent_size])
+ t = nets.MLP([args.latent_size, 8, args.latent_size])
+ if i % 2 == 0:
+ flows += [MaskedAffineFlow(b, t, s)]
+ else:
+ flows += [MaskedAffineFlow(1 - b, t, s), BatchNorm()]
+ flows = SimpleFlowModel(
+ flows[:-1]
+ ) # Remove last Batch Norm layer to allow arbitrary output
+
model = FlowVAE(flows).to(device)
+optimizer = optim.Adam(model.parameters(), lr=0.001)
+# train_losses = []
+train_loader, test_loader = flow_vae_datasets(args.dataset)
+
def train(model, epoch, beta):
+ model.train()
+ tr_loss = 0
+ progressbar = tqdm(enumerate(train_loader), total=len(train_loader))
+ for batch_n, (x, n) in progressbar:
+ x = x.to(device)
+ optimizer.zero_grad()
+ rc_batch, kld = model(x)
+ loss = bound(
+ rc_batch, x.view(x.size(0) * x.size(1), img_dim**2), kld.sum(), beta=beta
+ )
+ avg_loss = loss / len(x)
+ loss.backward()
+ tr_loss += loss.item()
+ optimizer.step()
+ progressbar.update()
+ if batch_n % args.log_intv == 0:
+ print(
+ "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
+ epoch,
+ batch_n * len(x),
+ len(train_loader.dataset),
+ 100.0 * batch_n / len(train_loader),
+ loss.item() / len(x),
+ )
+ )
+ print(model.test_params)
+ progressbar.close()
+ print(
+ "====> Epoch: {} Average loss: {:.4f}".format(
+ epoch, tr_loss / len(train_loader.dataset)
+ )
+ )
+
def test(model, epoch):
+ model.eval()
+ test_loss = 0
+ with torch.no_grad():
+ for i, (x, _) in enumerate(test_loader):
+ x = x.to(device)
+ rc_batch, kld = model(x)
+ test_loss += bound(
+ rc_batch, x.view(x.size(0) * x.size(1), img_dim**2), kld.sum(), beta=1
+ ).item()
+
+ test_loss /= len(test_loader.dataset)
+ print("====> Test set loss: {:.4f}".format(test_loss))
+ return test_loss
+
test_losses = []
+
def anneal(epoch, len_e):
+ return min(1.0, 0.01 + epoch / len_e)
+
if __name__ == "__main__":
+ if args.experiment_mode:
+ min_test_losses = []
+ min_test_losses.append(str(args))
+ for i in range(args.runs):
+ test_losses = []
+ model.__init__(flows)
+ model = model.to(device)
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
+ if i == 0:
+ seed = args.seed
+ else:
+ seed += 1
+ torch.manual_seed(seed)
+ for e in [i + 1 for i in range(args.epochs)]:
+ beta = anneal(e, args.epochs)
+ train(model, e, beta)
+ tl = test(model, e)
+ test_losses.append(tl)
+ print("====> Lowest test set loss: {:.4f}".format(min(test_losses)))
+ min_test_losses.append(min(test_losses))
+ Series = pd.Series(min_test_losses)
+
+ dirName = "experiments"
+ if not os.path.exists(dirName):
+ os.mkdir(dirName)
+ else:
+ pass
+ file_name = dirName + "/{}.xlsx".format(str(datetime.now()))
+ file_name = file_name.replace(":", "-")
+ Series.to_excel(file_name, index=False, header=None)
+ else:
+ for e in [i + 1 for i in range(args.epochs)]:
+ beta = anneal(e, args.epochs)
+ train(model, e, beta=beta)
+ tl = test(model, e)
+ test_losses.append(tl)
+
normflows
: A PyTorch Package for Normalizing Flowsnormflows
is a PyTorch implementation of discrete normalizing flows. Many popular flow architectures are implemented,
+see the list below. The package can be easily installed via pip.
+The basic usage is described here, and a full documentation
+is available as well. A more detailed description of this package is given in our
+accompanying paper.
Several sample use cases are provided in the
+examples
folder,
+including Glow,
+a VAE, and
+a Residual Flow.
+Moreover, two simple applications are highlighed in the examples section. You can run them
+yourself in Google Colab using the links below to get a feeling for normflows
.
Architecture | +Reference | +
---|---|
Planar Flow | +Rezende & Mohamed, 2015 | +
Radial Flow | +Rezende & Mohamed, 2015 | +
NICE | +Dinh et al., 2014 | +
Real NVP | +Dinh et al., 2017 | +
Glow | +Kingma et al., 2018 | +
Masked Autoregressive Flow | +Papamakarios et al., 2017 | +
Neural Spline Flow | +Durkan et al., 2019 | +
Circular Neural Spline Flow | +Rezende et al., 2020 | +
Residual Flow | +Chen et al., 2019 | +
Stochastic Normalizing Flow | +Wu et al., 2020 | +
Note that Neural Spline Flows with circular and non-circular coordinates +are supported as well.
+The latest version of the package can be installed via pip
+pip install normflows
+
+At least Python 3.7 is required. If you want to use a GPU, make sure that +PyTorch is set up correctly by following the instructions at the +PyTorch website.
+To run the example notebooks clone the repository first
+git clone https://github.com/VincentStimper/normalizing-flows.git
+
+and then install the dependencies.
+pip install -r requirements_examples.txt
+
+A normalizing flow consists of a base distribution, defined in
+nf.distributions.base
,
+and a list of flows, given in
+nf.flows
.
+Let's assume our target is a 2D distribution. We pick a diagonal Gaussian
+base distribution, which is the most popular choice. Our flow shall be a
+Real NVP model and, therefore, we need
+to define a neural network for computing the parameters of the affine coupling
+map. One dimension is used to compute the scale and shift parameter for the
+other dimension. After each coupling layer we swap their roles.
import normflows as nf
+
+# Define 2D Gaussian base distribution
+base = nf.distributions.base.DiagGaussian(2)
+
+# Define list of flows
+num_layers = 32
+flows = []
+for i in range(num_layers):
+ # Neural network with two hidden layers having 64 units each
+ # Last layer is initialized by zeros making training more stable
+ param_map = nf.nets.MLP([1, 64, 64, 2], init_zeros=True)
+ # Add flow layer
+ flows.append(nf.flows.AffineCouplingBlock(param_map))
+ # Swap dimensions
+ flows.append(nf.flows.Permute(2, mode='swap'))
+
+Once they are set up, we can define a
+nf.NormalizingFlow
+model. If the target density is available, it can be added to the model
+to be used during training. Sample target distributions are given in
+nf.distributions.target
.
# If the target density is not given
+model = nf.NormalizingFlow(base, flows)
+
+# If the target density is given
+target = nf.distributions.target.TwoMoons()
+model = nf.NormalizingFlow(base, flows, target)
+
+The loss can be computed with the methods of the model and minimized.
+# When doing maximum likelihood learning, i.e. minimizing the forward KLD
+# with no target distribution given
+loss = model.forward_kld(x)
+
+# When minimizing the reverse KLD based on the given target distribution
+loss = model.reverse_kld(num_samples=512)
+
+# Optimization as usual
+loss.backward()
+optimizer.step()
+
+We provide several illustrative examples of how to use the package in the
+examples
+directory. Among them are implementations of
+Glow,
+a VAE, and
+a Residual Flow.
+More advanced experiments can be done with the scripts listed in the
+repository about resampled base distributions,
+see its experiments
+folder.
Below, we consider two simple 2D examples.
+In this notebook, +which can directly be opened in +Colab, +we consider a 2D distribution with two half-moon-shaped modes as a target. We approximate it with a Real NVP model +and obtain the following results.
+Note that there might be a density filament connecting the two modes, which is due to an architectural limitation +of normalizing flows, especially prominent in Real NVP. You can find out more about it in +this paper.
+In another example, +which is available in Colab +as well, we apply a Neural Spline Flow model to a distribution defined on a cylinder. The resulting density is visualized below.
+This example is considered in the paper accompanying this repository.
+If you have problems, please read the package documentation +and check out the examples section above. You are also welcome to +create issues on GitHub to get help. Note that it is +worthwhile browsing the existing open +and closed issues, which might +address the problem you are facing.
+If you find a bug or have a feature request, please +file an issue on GitHub.
+You are welcome to contribute to the package by fixing the bug or adding the feature yourself. If you want to
+contribute, please add tests for the code you added or modified and ensure it passes successfully by running pytest
.
+This can be done by simply executing
pytest
+
+within your local version of the repository. Make sure you code is well documented, and we also encourage contributions +to the existing documentation. Once you finished coding and testing, please +create a pull request on GitHub.
+The package has been used in several research papers. Some of them are listed below.
+++Andrew Campbell, Wenlong Chen, Vincent Stimper, José Miguel Hernández-Lobato, and Yichuan Zhang. +A gradient based strategy for Hamiltonian Monte Carlo hyperparameter optimization. +In Proceedings of the 38th International Conference on Machine Learning, pp. 1238–1248. PMLR, 2021.
+ +Vincent Stimper, Bernhard Schölkopf, and José Miguel Hernández-Lobato. +Resampling Base Distributions of Normalizing Flows. +In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, volume 151, pp. 4915–4936, 2022.
+ +Laurence I. Midgley, Vincent Stimper, Gregor N. C. Simm, Bernhard Schölkopf, and José Miguel Hernández-Lobato. +Flow Annealed Importance Sampling Bootstrap. +The Eleventh International Conference on Learning Representations, 2023.
+ +Arnau Quera-Bofarull, Joel Dyer, Anisoara Calinescu, J. Doyne Farmer, and Michael Wooldridge. +BlackBIRDS: Black-Box Inference foR Differentiable Simulators. +Journal of Open Source Software, 8(89), 5776, 2023.
+ +Utkarsh Singhal, Carlos Esteves, Ameesh Makadia, and Stella X. Yu. +Learning to Transform for Generalizable Instance-wise Invariance. +Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 6211-6221, 2023.
+ +Ba-Hien Tran, Giulio Franzese, Pietro Michiardi, and Maurizio Filippone. +One-Line-of-Code Data Mollification Improves Optimization of Likelihood-based Generative Models. + Advances in Neural Information Processing Systems 36, pp. 6545–6567, 2023.
+ +
Moreover, the boltzgen
package
+has been build upon normflows
.
If you use normflows
, please cite the
+corresponding paper as follows.
++Stimper et al., (2023). normflows: A PyTorch Package for Normalizing Flows. +Journal of Open Source Software, 8(86), 5361, https://doi.org/10.21105/joss.05361
+
Bibtex
+@article{Stimper2023,
+ author = {Vincent Stimper and David Liu and Andrew Campbell and Vincent Berenz and Lukas Ryll and Bernhard Schölkopf and José Miguel Hernández-Lobato},
+ title = {normflows: A PyTorch Package for Normalizing Flows},
+ journal = {Journal of Open Source Software},
+ volume = {8},
+ number = {86},
+ pages = {5361},
+ publisher = {The Open Journal},
+ doi = {10.21105/joss.05361},
+ url = {https://doi.org/10.21105/joss.05361},
+ year = {2023}
+}
+
core
+
+
+ClassCondFlow
+
+
+
+ Bases: Module
Class conditional normalizing Flow model, providing the +class to be conditioned on only to the base distribution, +as done e.g. in Glow
+ +normflows/core.py
369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 |
|
__init__(q0, flows)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
q0 |
+ + | +
+
+
+ Base distribution + |
+ + required + | +
flows |
+ + | +
+
+
+ List of flows + |
+ + required + | +
normflows/core.py
376 +377 +378 +379 +380 +381 +382 +383 +384 +385 |
|
forward_kld(x, y)
+
+Estimates forward KL divergence, see arXiv 1912.02762
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch sampled from target distribution + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Estimate of forward KL divergence averaged over batch + |
+
normflows/core.py
387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 |
|
load(path)
+
+Load model from state dict
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
path |
+ + | +
+
+
+ Path including filename where to load model from + |
+ + required + | +
normflows/core.py
446 +447 +448 +449 +450 +451 +452 |
|
log_prob(x, y)
+
+Get log probability for batch
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch + |
+ + required + | +
y |
+ + | +
+
+
+ Classes of x + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability + |
+
normflows/core.py
420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 |
|
sample(num_samples=1, y=None)
+
+Samples from flow-based approximate distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
y |
+ + | +
+
+
+ Classes to sample from, will be sampled uniformly if None + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples, log probability + |
+
normflows/core.py
404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 |
|
save(path)
+
+Save state dict of model
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
param |
+
+ path
+ |
+
+
+
+ Path including filename where to save model + |
+ + required + | +
normflows/core.py
438 +439 +440 +441 +442 +443 +444 |
|
ConditionalNormalizingFlow
+
+
+
+ Bases: NormalizingFlow
Conditional normalizing flow model, providing condition, +which is also called context, to both the base distribution +and the flow layers
+ +normflows/core.py
216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 |
|
forward(z, context=None)
+
+Transforms latent variable z to the flow variable x
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Batch in the latent space + |
+ + required + | +
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the space of the target distribution + |
+
normflows/core.py
222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 |
|
forward_and_log_det(z, context=None)
+
+Transforms latent variable z to the flow variable x and +computes log determinant of the Jacobian
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Batch in the latent space + |
+ + required + | +
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the space of the target distribution, + |
+
+ | +
+
+
+ log determinant of the Jacobian + |
+
normflows/core.py
236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 |
|
forward_kld(x, context=None)
+
+Estimates forward KL divergence, see arXiv 1912.02762
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch sampled from target distribution + |
+ + required + | +
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Estimate of forward KL divergence averaged over batch + |
+
normflows/core.py
320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 |
|
inverse(x, context=None)
+
+Transforms flow variable x to the latent variable z
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch in the space of the target distribution + |
+ + required + | +
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the latent space + |
+
normflows/core.py
254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 |
|
inverse_and_log_det(x, context=None)
+
+Transforms flow variable x to the latent variable z and +computes log determinant of the Jacobian
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch in the space of the target distribution + |
+ + required + | +
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the latent space, log determinant of the + |
+
+ | +
+
+
+ Jacobian + |
+
normflows/core.py
268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 |
|
log_prob(x, context=None)
+
+Get log probability for batch
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch + |
+ + required + | +
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability + |
+
normflows/core.py
302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 |
|
reverse_kld(num_samples=1, context=None, beta=1.0, score_fn=True)
+
+Estimates reverse KL divergence, see arXiv 1912.02762
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw from base distribution + |
+
+ 1
+ |
+
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
beta |
+ + | +
+
+
+ Annealing parameter, see arXiv 1505.05770 + |
+
+ 1.0
+ |
+
score_fn |
+ + | +
+
+
+ Flag whether to include score function in gradient, see arXiv 1703.09194 + |
+
+ True
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Estimate of the reverse KL divergence averaged over latent samples + |
+
normflows/core.py
338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 |
|
sample(num_samples=1, context=None)
+
+Samples from flow-based approximate distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
context |
+ + | +
+
+
+ Batch of conditions/context + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples, log probability + |
+
normflows/core.py
286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 |
|
MultiscaleFlow
+
+
+
+ Bases: Module
Normalizing Flow model with multiscale architecture, see RealNVP or Glow paper
+ +normflows/core.py
455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 |
|
__init__(q0, flows, merges, transform=None, class_cond=True)
+
+Constructor
+Args:
+q0: List of base distribution + flows: List of list of flows for each level + merges: List of merge/split operations (forward pass must do merge) + transform: Initial transformation of inputs + class_cond: Flag, indicated whether model has class conditional +base distributions
+ +normflows/core.py
460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 |
|
forward(x, y=None)
+
+Get negative log-likelihood for maximum likelihood training
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch of data + |
+ + required + | +
y |
+ + | +
+
+
+ Batch of targets, if applicable + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Negative log-likelihood of the batch + |
+
normflows/core.py
492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 |
|
forward_and_log_det(z)
+
+Get observed variable x from list of latent variables z
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ List of latent variables + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Observed variable x, log determinant of Jacobian + |
+
normflows/core.py
504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 |
|
forward_kld(x, y=None)
+
+Estimates forward KL divergence, see see arXiv 1912.02762
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch sampled from target distribution + |
+ + required + | +
y |
+ + | +
+
+
+ Batch of targets, if applicable + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Estimate of forward KL divergence averaged over batch + |
+
normflows/core.py
480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 |
|
inverse_and_log_det(x)
+
+Get latent variable z from observed variable x
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Observed variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ List of latent variables z, log determinant of Jacobian + |
+
normflows/core.py
528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 |
|
load(path)
+
+Load model from state dict
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
path |
+ + | +
+
+
+ Path including filename where to load model from + |
+ + required + | +
normflows/core.py
626 +627 +628 +629 +630 +631 +632 |
|
log_prob(x, y)
+
+Get log probability for batch
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch + |
+ + required + | +
y |
+ + | +
+
+
+ Classes of x + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability + |
+
normflows/core.py
588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 |
|
reset_temperature()
+
+Set temperature values of base distributions back to None
+ +normflows/core.py
649 +650 +651 +652 +653 |
|
sample(num_samples=1, y=None, temperature=None)
+
+Samples from flow-based approximate distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
y |
+ + | +
+
+
+ Classes to sample from, will be sampled uniformly if None + |
+
+ None
+ |
+
temperature |
+ + | +
+
+
+ Temperature parameter for temp annealed sampling + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples, log probability + |
+
normflows/core.py
553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 |
|
save(path)
+
+Save state dict of model
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
path |
+ + | +
+
+
+ Path including filename where to save model + |
+ + required + | +
normflows/core.py
618 +619 +620 +621 +622 +623 +624 |
|
set_temperature(temperature)
+
+Set temperature for temperature a annealed sampling
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
temperature |
+ + | +
+
+
+ Temperature parameter + |
+ + required + | +
normflows/core.py
634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 |
|
NormalizingFlow
+
+
+
+ Bases: Module
Normalizing Flow model to approximate target distribution
+ +normflows/core.py
9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 |
|
__init__(q0, flows, p=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
q0 |
+ + | +
+
+
+ Base distribution + |
+ + required + | +
flows |
+ + | +
+
+
+ List of flows + |
+ + required + | +
p |
+ + | +
+
+
+ Target distribution + |
+
+ None
+ |
+
normflows/core.py
14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 |
|
forward(z)
+
+Transforms latent variable z to the flow variable x
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Batch in the latent space + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the space of the target distribution + |
+
normflows/core.py
27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 |
|
forward_and_log_det(z)
+
+Transforms latent variable z to the flow variable x and +computes log determinant of the Jacobian
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Batch in the latent space + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the space of the target distribution, + |
+
+ | +
+
+
+ log determinant of the Jacobian + |
+
normflows/core.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 |
|
forward_kld(x)
+
+Estimates forward KL divergence, see arXiv 1912.02762
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch sampled from target distribution + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Estimate of forward KL divergence averaged over batch + |
+
normflows/core.py
87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 |
|
inverse(x)
+
+Transforms flow variable x to the latent variable z
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch in the space of the target distribution + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the latent space + |
+
normflows/core.py
57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 |
|
inverse_and_log_det(x)
+
+Transforms flow variable x to the latent variable z and +computes log determinant of the Jacobian
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch in the space of the target distribution + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Batch in the latent space, log determinant of the + |
+
+ | +
+
+
+ Jacobian + |
+
normflows/core.py
70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 |
|
load(path)
+
+Load model from state dict
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
path |
+ + | +
+
+
+ Path including filename where to load model from + |
+ + required + | +
normflows/core.py
207 +208 +209 +210 +211 +212 +213 |
|
log_prob(x)
+
+Get log probability for batch
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Batch + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability + |
+
normflows/core.py
182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 |
|
reverse_alpha_div(num_samples=1, alpha=1, dreg=False)
+
+Alpha divergence when sampling from q
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
dreg |
+ + | +
+
+
+ Flag whether to use Double Reparametrized Gradient estimator, see arXiv 1810.04152 + |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Alpha divergence + |
+
normflows/core.py
133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 |
|
reverse_kld(num_samples=1, beta=1.0, score_fn=True)
+
+Estimates reverse KL divergence, see arXiv 1912.02762
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw from base distribution + |
+
+ 1
+ |
+
beta |
+ + | +
+
+
+ Annealing parameter, see arXiv 1505.05770 + |
+
+ 1.0
+ |
+
score_fn |
+ + | +
+
+
+ Flag whether to include score function in gradient, see arXiv 1703.09194 + |
+
+ True
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Estimate of the reverse KL divergence averaged over latent samples + |
+
normflows/core.py
104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 |
|
sample(num_samples=1)
+
+Samples from flow-based approximate distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples, log probability + |
+
normflows/core.py
167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 |
|
save(path)
+
+Save state dict of model
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
path |
+ + | +
+
+
+ Path including filename where to save model + |
+ + required + | +
normflows/core.py
199 +200 +201 +202 +203 +204 +205 |
|
NormalizingFlowVAE
+
+
+
+ Bases: Module
VAE using normalizing flows to express approximate distribution
+ +normflows/core.py
656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 |
|
__init__(prior, q0=distributions.Dirac(), flows=None, decoder=None)
+
+Constructor of normalizing flow model
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
prior |
+ + | +
+
+
+ Prior distribution of te VAE, i.e. Gaussian + |
+ + required + | +
decoder |
+ + | +
+
+
+ Optional decoder + |
+
+ None
+ |
+
flows |
+ + | +
+
+
+ Flows to transform output of base encoder + |
+
+ None
+ |
+
q0 |
+ + | +
+
+
+ Base Encoder + |
+
+ Dirac()
+ |
+
normflows/core.py
661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 |
|
forward(x, num_samples=1)
+
+Takes data batch, samples num_samples for each data point from base distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ data batch + |
+ + required + | +
num_samples |
+ + | +
+
+
+ number of samples to draw for each data point + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ latent variables for each batch and sample, log_q, and log_p + |
+
normflows/core.py
676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 |
|
core_test
+
+
+distributions
+
+
+base
+
+
+AffineGaussian
+
+
+
+ Bases: BaseDistribution
Diagonal Gaussian an affine constant transformation applied to it, +can be class conditional or not
+ +normflows/distributions/base.py
474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 |
|
__init__(shape, affine_shape, num_classes=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Shape of the variables + |
+ + required + | +
affine_shape |
+ + | +
+
+
+ Shape of the parameters in the affine transformation + |
+ + required + | +
num_classes |
+ + | +
+
+
+ Number of classes if the base is class conditional, None otherwise + |
+
+ None
+ |
+
normflows/distributions/base.py
480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 |
|
BaseDistribution
+
+
+
+ Bases: Module
Base distribution of a flow-based model +Parameters do not depend of target variable (as is the case for a VAE encoder)
+ +normflows/distributions/base.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 |
|
forward(num_samples=1)
+
+Samples from base distribution and calculates log probability
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw from the distriubtion + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples drawn from the distribution, log probability + |
+
normflows/distributions/base.py
17 +18 +19 +20 +21 +22 +23 +24 +25 +26 |
|
log_prob(z)
+
+Calculate log probability of batch of samples
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Batch of random variables to determine log probability for + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability for each batch element + |
+
normflows/distributions/base.py
28 +29 +30 +31 +32 +33 +34 +35 +36 +37 |
|
sample(num_samples=1, **kwargs)
+
+Samples from base distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw from the distriubtion + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples drawn from the distribution + |
+
normflows/distributions/base.py
39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 |
|
ClassCondDiagGaussian
+
+
+
+ Bases: BaseDistribution
Class conditional multivariate Gaussian distribution with diagonal covariance matrix
+ +normflows/distributions/base.py
273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 |
|
__init__(shape, num_classes)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Tuple with shape of data, if int shape has one dimension + |
+ + required + | +
num_classes |
+ + | +
+
+
+ Number of classes to condition on + |
+ + required + | +
normflows/distributions/base.py
278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 |
|
ConditionalDiagGaussian
+
+
+
+ Bases: BaseDistribution
Conditional multivariate Gaussian distribution with diagonal +covariance matrix, parameters are obtained by a context encoder, +context meaning the variable to condition on
+ +normflows/distributions/base.py
106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 |
|
__init__(shape, context_encoder)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Tuple with shape of data, if int shape has one dimension + |
+ + required + | +
context_encoder |
+ + | +
+
+
+ Computes mean and log of the standard deviation + |
+ + required + | +
normflows/distributions/base.py
112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 |
|
DiagGaussian
+
+
+
+ Bases: BaseDistribution
Multivariate Gaussian distribution with diagonal covariance matrix
+ +normflows/distributions/base.py
52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 |
|
__init__(shape, trainable=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Tuple with shape of data, if int shape has one dimension + |
+ + required + | +
trainable |
+ + | +
+
+
+ Flag whether to use trainable or fixed parameters + |
+
+ True
+ |
+
normflows/distributions/base.py
57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 |
|
GaussianMixture
+
+
+
+ Bases: BaseDistribution
Mixture of Gaussians with diagonal covariance matrix
+ +normflows/distributions/base.py
573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 |
|
__init__(n_modes, dim, loc=None, scale=None, weights=None, trainable=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
n_modes |
+ + | +
+
+
+ Number of modes of the mixture model + |
+ + required + | +
dim |
+ + | +
+
+
+ Number of dimensions of each Gaussian + |
+ + required + | +
loc |
+ + | +
+
+
+ List of mean values + |
+
+ None
+ |
+
scale |
+ + | +
+
+
+ List of diagonals of the covariance matrices + |
+
+ None
+ |
+
weights |
+ + | +
+
+
+ List of mode probabilities + |
+
+ None
+ |
+
trainable |
+ + | +
+
+
+ Flag, if true parameters will be optimized during training + |
+
+ True
+ |
+
normflows/distributions/base.py
578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 |
|
GaussianPCA
+
+
+
+ Bases: BaseDistribution
Gaussian distribution resulting from linearly mapping a normal distributed latent +variable describing the "content of the target"
+ +normflows/distributions/base.py
662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 |
|
__init__(dim, latent_dim=None, sigma=0.1)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
dim |
+ + | +
+
+
+ Number of dimensions of the flow variables + |
+ + required + | +
latent_dim |
+ + | +
+
+
+ Number of dimensions of the latent "content" variable; + if None it is set equal to dim + |
+
+ None
+ |
+
sigma |
+ + | +
+
+
+ Noise level + |
+
+ 0.1
+ |
+
normflows/distributions/base.py
668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 |
|
GlowBase
+
+
+
+ Bases: BaseDistribution
Base distribution of the Glow model, i.e. Diagonal Gaussian with one mean and +log scale for each channel
+ +normflows/distributions/base.py
347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 |
|
__init__(shape, num_classes=None, logscale_factor=3.0)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Shape of the variables + |
+ + required + | +
num_classes |
+ + | +
+
+
+ Number of classes if the base is class conditional, None otherwise + |
+
+ None
+ |
+
logscale_factor |
+ + | +
+
+
+ Scaling factor for mean and log variance + |
+
+ 3.0
+ |
+
normflows/distributions/base.py
353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 |
|
Uniform
+
+
+
+ Bases: BaseDistribution
Multivariate uniform distribution
+ +normflows/distributions/base.py
158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 |
|
__init__(shape, low=-1.0, high=1.0)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Tuple with shape of data, if int shape has one dimension + |
+ + required + | +
low |
+ + | +
+
+
+ Lower bound of uniform distribution + |
+
+ -1.0
+ |
+
high |
+ + | +
+
+
+ Upper bound of uniform distribution + |
+
+ 1.0
+ |
+
normflows/distributions/base.py
163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 |
|
UniformGaussian
+
+
+
+ Bases: BaseDistribution
Distribution of a 1D random variable with some entries having a uniform and +others a Gaussian distribution
+ +normflows/distributions/base.py
198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 |
|
__init__(ndim, ind, scale=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
ndim |
+ + | +
+
+
+ Int, number of dimensions + |
+ + required + | +
ind |
+ + | +
+
+
+ Iterable, indices of uniformly distributed entries + |
+ + required + | +
scale |
+ + | +
+
+
+ Iterable, standard deviation of Gaussian or width of uniform distribution + |
+
+ None
+ |
+
normflows/distributions/base.py
204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 |
|
base_test
+
+
+decoder
+
+
+BaseDecoder
+
+
+
+ Bases: Module
normflows/distributions/decoder.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 |
|
forward(z)
+
+Decodes z to x
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ x, std of x + |
+
normflows/distributions/decoder.py
10 +11 +12 +13 +14 +15 +16 +17 +18 +19 |
|
log_prob(x, z)
+
+Log probability
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ observable + |
+ + required + | +
z |
+ + | +
+
+
+ latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log(p) of x given z + |
+
normflows/distributions/decoder.py
21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 |
|
NNBernoulliDecoder
+
+
+
+ Bases: BaseDecoder
BaseDecoder representing a Bernoulli distribution with mean parametrized by a NN
+ +normflows/distributions/decoder.py
73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 |
|
__init__(net)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
net |
+ + | +
+
+
+ neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) + |
+ + required + | +
normflows/distributions/decoder.py
78 +79 +80 +81 +82 +83 +84 +85 |
|
NNDiagGaussianDecoder
+
+
+
+ Bases: BaseDecoder
BaseDecoder representing a diagonal Gaussian distribution with mean and std parametrized by a NN
+ +normflows/distributions/decoder.py
34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 |
|
__init__(net)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
net |
+ + | +
+
+
+ neural network parametrizing mean and standard deviation of diagonal Gaussian + |
+ + required + | +
normflows/distributions/decoder.py
39 +40 +41 +42 +43 +44 +45 +46 |
|
decoder_test
+
+
+distribution_test
+
+
+DistributionTest
+
+
+
+ Bases: TestCase
Generic test case for distribution modules
+ +normflows/distributions/distribution_test.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 |
|
encoder
+
+
+BaseEncoder
+
+
+
+ Bases: Module
Base distribution of a flow-based variational autoencoder +Parameters of the distribution depend of the target variable x
+ +normflows/distributions/encoder.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 |
|
forward(x, num_samples=1)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Variable to condition on, first dimension is batch size + |
+ + required + | +
num_samples |
+ + | +
+
+
+ number of samples to draw per element of mini-batch + |
+
+ 1
+ |
+
Returns + sample of z for x, log probability for sample
+ +normflows/distributions/encoder.py
15 +16 +17 +18 +19 +20 +21 +22 +23 +24 |
|
log_prob(z, x)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Primary random variable, first dimension is batch size + |
+ + required + | +
x |
+ + | +
+
+
+ Variable to condition on, first dimension is batch size + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of z given x + |
+
normflows/distributions/encoder.py
26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 |
|
ConstDiagGaussian
+
+
+
+ Bases: BaseEncoder
normflows/distributions/encoder.py
74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 |
|
__init__(loc, scale)
+
+Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
loc |
+ + | +
+
+
+ mean vector of the distribution + |
+ + required + | +
scale |
+ + | +
+
+
+ vector of the standard deviations on the diagonal of the covariance matrix + |
+ + required + | +
normflows/distributions/encoder.py
75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 |
|
forward(x=None, num_samples=1)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Variable to condition on, will only be used to determine the batch size + |
+
+ None
+ |
+
num_samples |
+ + | +
+
+
+ number of samples to draw per element of mini-batch + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ sample of z for x, log probability for sample + |
+
normflows/distributions/encoder.py
91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 |
|
log_prob(z, x)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Primary random variable, first dimension is batch dimension + |
+ + required + | +
x |
+ + | +
+
+
+ Variable to condition on, first dimension is batch dimension + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of z given x + |
+
normflows/distributions/encoder.py
111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 |
|
NNDiagGaussian
+
+
+
+ Bases: BaseEncoder
Diagonal Gaussian distribution with mean and variance determined by a neural network
+ +normflows/distributions/encoder.py
130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 |
|
__init__(net)
+
+Construtor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
net |
+ + | +
+
+
+ net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) + |
+ + required + | +
normflows/distributions/encoder.py
135 +136 +137 +138 +139 +140 +141 +142 |
|
forward(x, num_samples=1)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+ + | +
+
+
+ Variable to condition on + |
+ + required + | +
num_samples |
+ + | +
+
+
+ number of samples to draw per element of mini-batch + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ sample of z for x, log probability for sample + |
+
normflows/distributions/encoder.py
144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 |
|
log_prob(z, x)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Primary random variable, first dimension is batch dimension + |
+ + required + | +
x |
+ + | +
+
+
+ Variable to condition on, first dimension is batch dimension + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of z given x + |
+
normflows/distributions/encoder.py
167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 |
|
encoder_test
+
+
+linear_interpolation
+
+
+LinearInterpolation
+
+
+Linear interpolation of two distributions in the log space
+ +normflows/distributions/linear_interpolation.py
1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 |
|
__init__(dist1, dist2, alpha)
+
+Constructor
+Interpolation parameter alpha:
+log_p = alpha * log_p_1 + (1 - alpha) * log_p_2
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
dist1 |
+ + | +
+
+
+ First distribution + |
+ + required + | +
dist2 |
+ + | +
+
+
+ Second distribution + |
+ + required + | +
alpha |
+ + | +
+
+
+ Interpolation parameter + |
+ + required + | +
normflows/distributions/linear_interpolation.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 |
|
mh_proposal
+
+
+DiagGaussianProposal
+
+
+
+ Bases: MHProposal
Diagonal Gaussian distribution with previous value as mean +as a proposal for Metropolis Hastings algorithm
+ +normflows/distributions/mh_proposal.py
47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 |
|
__init__(shape, scale)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Shape of variables to sample + |
+ + required + | +
scale |
+ + | +
+
+
+ Standard deviation of distribution + |
+ + required + | +
normflows/distributions/mh_proposal.py
53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 |
|
MHProposal
+
+
+
+ Bases: Module
Proposal distribution for the Metropolis Hastings algorithm
+ +normflows/distributions/mh_proposal.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 |
|
forward(z)
+
+Draw samples given z and compute log probability difference
+log(p(z | z_new)) - log(p(z_new | z))
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ Previous samples + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Proposal, difference of log probability ratio + |
+
normflows/distributions/mh_proposal.py
31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 |
|
log_prob(z_, z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z_ |
+ + | +
+
+
+ Potential new sample + |
+ + required + | +
z |
+ + | +
+
+
+ Previous sample + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Log probability of proposal distribution + |
+
normflows/distributions/mh_proposal.py
20 +21 +22 +23 +24 +25 +26 +27 +28 +29 |
|
sample(z)
+
+Sample new value based on previous z
+ +normflows/distributions/mh_proposal.py
14 +15 +16 +17 +18 |
|
prior
+
+
+ImagePrior
+
+
+
+ Bases: Module
Intensities of an image determine probability density of prior
+ +normflows/distributions/prior.py
21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 |
|
__init__(image, x_range=[-3, 3], y_range=[-3, 3], eps=1e-10)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
image |
+ + | +
+
+
+ image as np matrix + |
+ + required + | +
x_range |
+ + | +
+
+
+ x range to position image at + |
+
+ [-3, 3]
+ |
+
y_range |
+ + | +
+
+
+ y range to position image at + |
+
+ [-3, 3]
+ |
+
eps |
+ + | +
+
+
+ small value to add to image to avoid log(0) problems + |
+
+ 1e-10
+ |
+
normflows/distributions/prior.py
26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 |
|
log_prob(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 |
|
rejection_sampling(num_steps=1)
+
+Perform rejection sampling on image distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_steps |
+ + | +
+
+
+ Number of rejection sampling steps to perform + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Accepted samples + |
+
normflows/distributions/prior.py
71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 |
|
sample(num_samples=1)
+
+Sample from image distribution through rejection sampling
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples + |
+
normflows/distributions/prior.py
90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 |
|
PriorDistribution
+
+
+normflows/distributions/prior.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 |
|
log_prob(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
10 +11 +12 +13 +14 +15 +16 +17 +18 |
|
Sinusoidal
+
+
+
+ Bases: PriorDistribution
normflows/distributions/prior.py
152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 |
|
__init__(scale, period)
+
+Distribution 2d with sinusoidal density +given by
+w_1(z) = sin(2*pi / period * z[0])
+log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
scale |
+ + | +
+
+
+ scale of the distribution, see formula + |
+ + required + | +
period |
+ + | +
+
+
+ period of the sinosoidal + |
+ + required + | +
normflows/distributions/prior.py
153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 |
|
log_prob(z)
+
+log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2
+w_1(z) = sin(2*pi / period * z[0])
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 |
|
Sinusoidal_gap
+
+
+
+ Bases: PriorDistribution
normflows/distributions/prior.py
197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 |
|
__init__(scale, period)
+
+Distribution 2d with sinusoidal density with gap +given by
+w_1(z) = sin(2*pi / period * z[0])
+w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2)
+log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2))
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
loc |
+ + | +
+
+
+ distance of modes from the origin + |
+ + required + | +
scale |
+ + | +
+
+
+ scale of modes + |
+ + required + | +
normflows/distributions/prior.py
198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 |
|
log_prob(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 |
|
Sinusoidal_split
+
+
+
+ Bases: PriorDistribution
normflows/distributions/prior.py
248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 |
|
__init__(scale, period)
+
+Distribution 2d with sinusoidal density with split +given by
+w_1(z) = sin(2*pi / period * z[0])
+w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3)
+log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2))
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
loc |
+ + | +
+
+
+ distance of modes from the origin + |
+ + required + | +
scale |
+ + | +
+
+
+ scale of modes + |
+ + required + | +
normflows/distributions/prior.py
249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 |
|
log_prob(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 |
|
Smiley
+
+
+
+ Bases: PriorDistribution
normflows/distributions/prior.py
299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 |
|
__init__(scale)
+
+Distribution 2d of a smiley :)
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
scale |
+ + | +
+
+
+ scale of the smiley + |
+ + required + | +
normflows/distributions/prior.py
300 +301 +302 +303 +304 +305 +306 +307 |
|
log_prob(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 |
|
TwoModes
+
+
+
+ Bases: PriorDistribution
normflows/distributions/prior.py
107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 |
|
__init__(loc, scale)
+
+Distribution 2d with two modes
+Distribution 2d with two modes at
+z[0] = -loc
and z[0] = loc
+following the density
log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2
+ - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2))
+
+Args: + loc: distance of modes from the origin + scale: scale of modes
+ +normflows/distributions/prior.py
108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 |
|
log_prob(z)
+
+log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2
+ - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2))
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/prior.py
126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 |
|
prior_test
+
+
+target
+
+
+CircularGaussianMixture
+
+
+
+ Bases: Module
Two-dimensional Gaussian mixture arranged in a circle
+ +normflows/distributions/target.py
132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 |
|
__init__(n_modes=8)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
n_modes |
+ + | +
+
+
+ Number of modes + |
+
+ 8
+ |
+
normflows/distributions/target.py
137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 |
|
ConditionalDiagGaussian
+
+
+
+ Bases: Target
Gaussian distribution conditioned on its mean and standard +deviation
+The first half of the entries of the condition, also called context, +are the mean, while the second half are the standard deviation.
+ +normflows/distributions/target.py
198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 |
|
RingMixture
+
+
+
+ Bases: Target
Mixture of ring distributions in two dimensions
+ +normflows/distributions/target.py
176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 |
|
Target
+
+
+
+ Bases: Module
Sample target distributions to test models
+ +normflows/distributions/target.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 |
|
__init__(prop_scale=torch.tensor(6.0), prop_shift=torch.tensor(-3.0))
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
prop_scale |
+ + | +
+
+
+ Scale for the uniform proposal + |
+
+ tensor(6.0)
+ |
+
prop_shift |
+ + | +
+
+
+ Shift for the uniform proposal + |
+
+ tensor(-3.0)
+ |
+
normflows/distributions/target.py
13 +14 +15 +16 +17 +18 +19 +20 +21 +22 |
|
log_prob(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/target.py
24 +25 +26 +27 +28 +29 +30 +31 +32 |
|
rejection_sampling(num_steps=1)
+
+Perform rejection sampling on image distribution
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_steps |
+ + | +
+
+
+ Number of rejection sampling steps to perform + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Accepted samples + |
+
normflows/distributions/target.py
34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 |
|
sample(num_samples=1)
+
+Sample from image distribution through rejection sampling
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ Number of samples to draw + |
+
+ 1
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Samples + |
+
normflows/distributions/target.py
57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 |
|
TwoIndependent
+
+
+
+ Bases: Target
Target distribution that combines two independent distributions of equal +size into one distribution. This is needed for Augmented Normalizing Flows, +see https://arxiv.org/abs/2002.07101
+ +normflows/distributions/target.py
76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 |
|
TwoMoons
+
+
+
+ Bases: Target
Bimodal two-dimensional distribution
+ +normflows/distributions/target.py
99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 |
|
log_prob(z)
+
+log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2
+ + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2)
+ + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2))
+
+
+
+
+ Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ value or batch of latent variable + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ log probability of the distribution for z + |
+
normflows/distributions/target.py
109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 |
|
target_test
+
+
+flows
+
+
+affine
+
+
+autoregressive
+
+
+Autoregressive
+
+
+
+ Bases: Flow
Transforms each input variable with an invertible elementwise transformation.
+The parameters of each invertible elementwise transformation can be functions of previous input +variables, but they must not depend on the current or any following input variables.
+NOTE Calculating the inverse transform is D times slower than calculating the +forward transform, where D is the dimensionality of the input to the transform.
+ +normflows/flows/affine/autoregressive.py
10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 |
|
MaskedAffineAutoregressive
+
+
+
+ Bases: Autoregressive
Masked affine autoregressive flow, mostly referred to as +Masked Autoregressive Flow (MAF), see +arXiv 1705.07057.
+ +normflows/flows/affine/autoregressive.py
50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 |
|
__init__(features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+ + | +
+
+
+ Number of features/input dimensions + |
+ + required + | +
hidden_features |
+ + | +
+
+
+ Number of hidden units in the MADE network + |
+ + required + | +
context_features |
+ + | +
+
+
+ Number of context/conditional features + |
+
+ None
+ |
+
num_blocks |
+ + | +
+
+
+ Number of blocks in the MADE network + |
+
+ 2
+ |
+
use_residual_blocks |
+ + | +
+
+
+ Flag whether residual blocks should be used + |
+
+ True
+ |
+
random_mask |
+ + | +
+
+
+ Flag whether to use random masks + |
+
+ False
+ |
+
activation |
+ + | +
+
+
+ Activation function to be used in the MADE network + |
+
+ relu
+ |
+
dropout_probability |
+ + | +
+
+
+ Dropout probability in the MADE network + |
+
+ 0.0
+ |
+
use_batch_norm |
+ + | +
+
+
+ Flag whether batch normalization should be used + |
+
+ False
+ |
+
normflows/flows/affine/autoregressive.py
55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 |
|
autoregressive_test
+
+
+coupling
+
+
+AffineConstFlow
+
+
+
+ Bases: Flow
scales and shifts with learned constants per dimension. In the NICE paper there is a +scaling layer which is a special case of this where t is None
+ +normflows/flows/affine/coupling.py
9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 |
|
__init__(shape, scale=True, shift=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Shape of the coupling layer + |
+ + required + | +
scale |
+ + | +
+
+
+ Flag whether to apply scaling + |
+
+ True
+ |
+
shift |
+ + | +
+
+
+ Flag whether to apply shift + |
+
+ True
+ |
+
logscale_factor |
+ + | +
+
+
+ Optional factor which can be used to control the scale of the log scale factor + |
+ + required + | +
normflows/flows/affine/coupling.py
15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 |
|
AffineCoupling
+
+
+
+ Bases: Flow
Affine Coupling layer as introduced RealNVP paper, see arXiv: 1605.08803
+ +normflows/flows/affine/coupling.py
99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 |
|
__init__(param_map, scale=True, scale_map='exp')
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
param_map |
+ + | +
+
+
+ Maps features to shift and scale parameter (if applicable) + |
+ + required + | +
scale |
+ + | +
+
+
+ Flag whether scale shall be applied + |
+
+ True
+ |
+
scale_map |
+ + | +
+
+
+ Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model + |
+
+ 'exp'
+ |
+
normflows/flows/affine/coupling.py
104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 |
|
forward(z)
+
+z is a list of z1 and z2; z = [z1, z2]
+z1 is left constant and affine map is applied to z2 with parameters depending
+on z1
normflows/flows/affine/coupling.py
117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 |
|
AffineCouplingBlock
+
+
+
+ Bases: Flow
Affine Coupling layer including split and merge operation
+ +normflows/flows/affine/coupling.py
232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 |
|
__init__(param_map, scale=True, scale_map='exp', split_mode='channel')
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
param_map |
+ + | +
+
+
+ Maps features to shift and scale parameter (if applicable) + |
+ + required + | +
scale |
+ + | +
+
+
+ Flag whether scale shall be applied + |
+
+ True
+ |
+
scale_map |
+ + | +
+
+
+ Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow + |
+
+ 'exp'
+ |
+
split_mode |
+ + | +
+
+
+ Splitting mode, for possible values see Split class + |
+
+ 'channel'
+ |
+
normflows/flows/affine/coupling.py
237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 |
|
CCAffineConst
+
+
+
+ Bases: Flow
Affine constant flow layer with class-conditional parameters
+ +normflows/flows/affine/coupling.py
57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 |
|
MaskedAffineFlow
+
+
+
+ Bases: Flow
RealNVP as introduced in arXiv: 1605.08803
+Masked affine flow:
+f(z) = b * z + (1 - b) * (z * exp(s(b * z)) + t)
+
+normflows/flows/affine/coupling.py
174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 |
|
__init__(b, t=None, s=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
b |
+ + | +
+
+
+ mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s + |
+ + required + | +
t |
+ + | +
+
+
+ translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied + |
+
+ None
+ |
+
s |
+ + | +
+
+
+ scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied + |
+
+ None
+ |
+
normflows/flows/affine/coupling.py
187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 |
|
coupling_test
+
+
+glow
+
+
+GlowBlock
+
+
+
+ Bases: Flow
Glow: Generative Flow with Invertible 1×1 Convolutions, arXiv: 1807.03039
+One Block of the Glow model, comprised of
+normflows/flows/affine/glow.py
11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 |
|
__init__(channels, hidden_channels, scale=True, scale_map='sigmoid', split_mode='channel', leaky=0.0, init_zeros=True, use_lu=True, net_actnorm=False)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
channels |
+ + | +
+
+
+ Number of channels of the data + |
+ + required + | +
hidden_channels |
+ + | +
+
+
+ number of channels in the hidden layer of the ConvNet + |
+ + required + | +
scale |
+ + | +
+
+
+ Flag, whether to include scale in affine coupling layer + |
+
+ True
+ |
+
scale_map |
+ + | +
+
+
+ Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow + |
+
+ 'sigmoid'
+ |
+
split_mode |
+ + | +
+
+
+ Splitting mode, for possible values see Split class + |
+
+ 'channel'
+ |
+
leaky |
+ + | +
+
+
+ Leaky parameter of LeakyReLUs of ConvNet2d + |
+
+ 0.0
+ |
+
init_zeros |
+ + | +
+
+
+ Flag whether to initialize last conv layer with zeros + |
+
+ True
+ |
+
use_lu |
+ + | +
+
+
+ Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers + |
+
+ True
+ |
+
logscale_factor |
+ + | +
+
+
+ Factor which can be used to control the scale of the log scale factor, see source + |
+ + required + | +
normflows/flows/affine/glow.py
21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 |
|
glow_test
+
+
+base
+
+
+Composite
+
+
+
+ Bases: Flow
Composes several flows into one, in the order they are given.
+ +normflows/flows/base.py
48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 |
|
__init__(flows)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
flows |
+ + | +
+
+
+ Iterable of flows to composite + |
+ + required + | +
normflows/flows/base.py
53 +54 +55 +56 +57 +58 +59 +60 |
|
Flow
+
+
+
+ Bases: Module
Generic class for flow functions
+ +normflows/flows/base.py
5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 |
|
forward(z)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
z |
+ + | +
+
+
+ input variable, first dimension is batch dim + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ transformed z and log of absolute determinant + |
+
normflows/flows/base.py
13 +14 +15 +16 +17 +18 +19 +20 +21 |
|
Reverse
+
+
+
+ Bases: Flow
Switches the forward transform of a flow layer with its inverse and vice versa
+ +normflows/flows/base.py
27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 |
|
__init__(flow)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
flow |
+ + | +
+
+
+ Flow layer to be reversed + |
+ + required + | +
normflows/flows/base.py
32 +33 +34 +35 +36 +37 +38 +39 |
|
base_test
+
+
+flow_test
+
+
+FlowTest
+
+
+
+ Bases: TestCase
Generic test case for flow modules
+ +normflows/flows/flow_test.py
7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 |
|
mixing
+
+
+Invertible1x1Conv
+
+
+
+ Bases: Flow
Invertible 1x1 convolution introduced in the Glow paper +Assumes 4d input/output tensors of the form NCHW
+ +normflows/flows/mixing.py
57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 |
|
__init__(num_channels, use_lu=False)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_channels |
+ + | +
+
+
+ Number of channels of the data + |
+ + required + | +
use_lu |
+ + | +
+
+
+ Flag whether to parametrize weights through the LU decomposition + |
+
+ False
+ |
+
normflows/flows/mixing.py
63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 |
|
InvertibleAffine
+
+
+
+ Bases: Flow
Invertible affine transformation without shift, i.e. one-dimensional +version of the invertible 1x1 convolutions
+ +normflows/flows/mixing.py
136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 |
|
__init__(num_channels, use_lu=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_channels |
+ + | +
+
+
+ Number of channels of the data + |
+ + required + | +
use_lu |
+ + | +
+
+
+ Flag whether to parametrize weights through the LU decomposition + |
+
+ True
+ |
+
normflows/flows/mixing.py
142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 |
|
LULinearPermute
+
+
+
+ Bases: Flow
Fixed permutation combined with a linear transformation parametrized +using the LU decomposition, used in https://arxiv.org/abs/1906.04032
+ +normflows/flows/mixing.py
535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 |
|
__init__(num_channels, identity_init=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_channels |
+ + | +
+
+
+ Number of dimensions of the data + |
+ + required + | +
identity_init |
+ + | +
+
+
+ Flag, whether to initialize linear transform as identity matrix + |
+
+ True
+ |
+
normflows/flows/mixing.py
541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 |
|
Permute
+
+
+
+ Bases: Flow
Permutation features along the channel dimension
+ +normflows/flows/mixing.py
9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 |
|
__init__(num_channels, mode='shuffle')
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_channel |
+ + | +
+
+
+ Number of channels + |
+ + required + | +
mode |
+ + | +
+
+
+ Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part + |
+
+ 'shuffle'
+ |
+
normflows/flows/mixing.py
14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 |
|
mixing_test
+
+
+neural_spline
+
+
+autoregressive
+
+
+Implementations of autoregressive transforms. +Code taken from https://github.com/bayesiains/nsf
+ + + +autoregressive_test
+
+
+Tests for the autoregressive transforms. +Code partially taken from https://github.com/bayesiains/nsf
+ + + +coupling
+
+
+Implementations of various coupling layers. +Code taken from https://github.com/bayesiains/nsf
+ + + +Coupling
+
+
+
+ Bases: Flow
A base class for coupling layers. Supports 2D inputs (NxD), as well as 4D inputs for +images (NxCxHxW). For images the splitting is done on the channel dimension, using the +provided 1D mask.
+ +normflows/flows/neural_spline/coupling.py
16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 |
|
__init__(mask, transform_net_create_fn, unconditional_transform=None)
+
+Constructor.
+mask: a 1-dim tensor, tuple or list. It indexes inputs as follows:
+mask[i] > 0
, input[i]
will be transformed.mask[i] <= 0
, input[i]
will be passed unchanged.normflows/flows/neural_spline/coupling.py
21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 |
|
coupling_test
+
+
+Tests for the coupling Transforms. +Code partially taken from https://github.com/bayesiains/nsf
+ + + +wrapper
+
+
+AutoregressiveRationalQuadraticSpline
+
+
+
+ Bases: Flow
Neural spline flow coupling layer, wrapper for the implementation +of Durkan et al., see sources
+ +normflows/flows/neural_spline/wrapper.py
186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 |
|
__init__(num_input_channels, num_blocks, num_hidden_channels, num_context_channels=None, num_bins=8, tail_bound=3, activation=nn.ReLU, dropout_probability=0.0, permute_mask=False, init_identity=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_input_channels |
+
+ int
+ |
+
+
+
+ Flow dimension + |
+ + required + | +
num_blocks |
+
+ int
+ |
+
+
+
+ Number of residual blocks of the parameter NN + |
+ + required + | +
num_hidden_channels |
+
+ int
+ |
+
+
+
+ Number of hidden units of the NN + |
+ + required + | +
num_context_channels |
+
+ int
+ |
+
+
+
+ Number of context/conditional channels + |
+
+ None
+ |
+
num_bins |
+
+ int
+ |
+
+
+
+ Number of bins + |
+
+ 8
+ |
+
tail_bound |
+
+ int
+ |
+
+
+
+ Bound of the spline tails + |
+
+ 3
+ |
+
activation |
+
+ Module
+ |
+
+
+
+ Activation function + |
+
+ ReLU
+ |
+
dropout_probability |
+
+ float
+ |
+
+
+
+ Dropout probability of the NN + |
+
+ 0.0
+ |
+
permute_mask |
+
+ bool
+ |
+
+
+
+ Flag, permutes the mask of the NN + |
+
+ False
+ |
+
init_identity |
+
+ bool
+ |
+
+
+
+ Flag, initialize transform as identity + |
+
+ True
+ |
+
normflows/flows/neural_spline/wrapper.py
192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 |
|
CircularAutoregressiveRationalQuadraticSpline
+
+
+
+ Bases: Flow
Neural spline flow coupling layer, wrapper for the implementation +of Durkan et al., see sources
+ +normflows/flows/neural_spline/wrapper.py
247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 |
|
__init__(num_input_channels, num_blocks, num_hidden_channels, ind_circ, num_context_channels=None, num_bins=8, tail_bound=3, activation=nn.ReLU, dropout_probability=0.0, permute_mask=True, init_identity=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_input_channels |
+
+ int
+ |
+
+
+
+ Flow dimension + |
+ + required + | +
num_blocks |
+
+ int
+ |
+
+
+
+ Number of residual blocks of the parameter NN + |
+ + required + | +
num_hidden_channels |
+
+ int
+ |
+
+
+
+ Number of hidden units of the NN + |
+ + required + | +
ind_circ |
+
+ Iterable
+ |
+
+
+
+ Indices of the circular coordinates + |
+ + required + | +
num_context_channels |
+
+ int
+ |
+
+
+
+ Number of context/conditional channels + |
+
+ None
+ |
+
num_bins |
+
+ int
+ |
+
+
+
+ Number of bins + |
+
+ 8
+ |
+
tail_bound |
+
+ int
+ |
+
+
+
+ Bound of the spline tails + |
+
+ 3
+ |
+
activation |
+
+ torch module
+ |
+
+
+
+ Activation function + |
+
+ ReLU
+ |
+
dropout_probability |
+
+ float
+ |
+
+
+
+ Dropout probability of the NN + |
+
+ 0.0
+ |
+
permute_mask |
+
+ bool
+ |
+
+
+
+ Flag, permutes the mask of the NN + |
+
+ True
+ |
+
init_identity |
+
+ bool
+ |
+
+
+
+ Flag, initialize transform as identity + |
+
+ True
+ |
+
normflows/flows/neural_spline/wrapper.py
253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 |
|
CircularCoupledRationalQuadraticSpline
+
+
+
+ Bases: Flow
Neural spline flow coupling layer with circular coordinates
+ +normflows/flows/neural_spline/wrapper.py
88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 |
|
__init__(num_input_channels, num_blocks, num_hidden_channels, ind_circ, num_context_channels=None, num_bins=8, tail_bound=3.0, activation=nn.ReLU, dropout_probability=0.0, reverse_mask=False, mask=None, init_identity=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_input_channels |
+
+ int
+ |
+
+
+
+ Flow dimension + |
+ + required + | +
num_blocks |
+
+ int
+ |
+
+
+
+ Number of residual blocks of the parameter NN + |
+ + required + | +
num_hidden_channels |
+
+ int
+ |
+
+
+
+ Number of hidden units of the NN + |
+ + required + | +
num_context_channels |
+
+ int
+ |
+
+
+
+ Number of context/conditional channels + |
+
+ None
+ |
+
ind_circ |
+
+ Iterable
+ |
+
+
+
+ Indices of the circular coordinates + |
+ + required + | +
num_bins |
+
+ int
+ |
+
+
+
+ Number of bins + |
+
+ 8
+ |
+
tail_bound |
+
+ float or Iterable
+ |
+
+
+
+ Bound of the spline tails + |
+
+ 3.0
+ |
+
activation |
+
+ torch module
+ |
+
+
+
+ Activation function + |
+
+ ReLU
+ |
+
dropout_probability |
+
+ float
+ |
+
+
+
+ Dropout probability of the NN + |
+
+ 0.0
+ |
+
reverse_mask |
+
+ bool
+ |
+
+
+
+ Flag whether the reverse mask should be used + |
+
+ False
+ |
+
mask |
+
+ torch tensor
+ |
+
+
+
+ Mask to be used, alternating masked generated is None + |
+
+ None
+ |
+
init_identity |
+
+ bool
+ |
+
+
+
+ Flag, initialize transform as identity + |
+
+ True
+ |
+
normflows/flows/neural_spline/wrapper.py
93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 |
|
CoupledRationalQuadraticSpline
+
+
+
+ Bases: Flow
Neural spline flow coupling layer, wrapper for the implementation +of Durkan et al., see source
+ +normflows/flows/neural_spline/wrapper.py
14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 |
|
__init__(num_input_channels, num_blocks, num_hidden_channels, num_context_channels=None, num_bins=8, tails='linear', tail_bound=3.0, activation=nn.ReLU, dropout_probability=0.0, reverse_mask=False, init_identity=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_input_channels |
+
+ int
+ |
+
+
+
+ Flow dimension + |
+ + required + | +
num_blocks |
+
+ int
+ |
+
+
+
+ Number of residual blocks of the parameter NN + |
+ + required + | +
num_hidden_channels |
+
+ int
+ |
+
+
+
+ Number of hidden units of the NN + |
+ + required + | +
num_context_channels |
+
+ int
+ |
+
+
+
+ Number of context/conditional channels + |
+
+ None
+ |
+
num_bins |
+
+ int
+ |
+
+
+
+ Number of bins + |
+
+ 8
+ |
+
tails |
+
+ str
+ |
+
+
+
+ Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval + |
+
+ 'linear'
+ |
+
tail_bound |
+
+ float
+ |
+
+
+
+ Bound of the spline tails + |
+
+ 3.0
+ |
+
activation |
+
+ torch module
+ |
+
+
+
+ Activation function + |
+
+ ReLU
+ |
+
dropout_probability |
+
+ float
+ |
+
+
+
+ Dropout probability of the NN + |
+
+ 0.0
+ |
+
reverse_mask |
+
+ bool
+ |
+
+
+
+ Flag whether the reverse mask should be used + |
+
+ False
+ |
+
init_identity |
+
+ bool
+ |
+
+
+
+ Flag, initialize transform as identity + |
+
+ True
+ |
+
normflows/flows/neural_spline/wrapper.py
20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 |
|
wrapper_test
+
+
+normalization
+
+
+ActNorm
+
+
+
+ Bases: AffineConstFlow
An AffineConstFlow but with a data-dependent initialization, +where on the very first batch we clever initialize the s,t so that the output +is unit gaussian. As described in Glow paper.
+ +normflows/flows/normalization.py
7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 |
|
BatchNorm
+
+
+
+ Bases: Flow
Batch Normalization with out considering the derivatives of the batch statistics, see arXiv: 1605.08803
+ +normflows/flows/normalization.py
42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 |
|
forward(z)
+
+Do batch norm over batch and sample dimension
+ +normflows/flows/normalization.py
52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 |
|
periodic
+
+
+PeriodicShift
+
+
+
+ Bases: Flow
Shift and wrap periodic coordinates
+ +normflows/flows/periodic.py
35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 |
|
__init__(ind, bound=1.0, shift=0.0)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
ind |
+ + | +
+
+
+ Iterable, indices of coordinates to be mapped + |
+ + required + | +
bound |
+ + | +
+
+
+ Float or iterable, bound of interval + |
+
+ 1.0
+ |
+
shift |
+ + | +
+
+
+ Tensor, shift to be applied + |
+
+ 0.0
+ |
+
normflows/flows/periodic.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 |
|
PeriodicWrap
+
+
+
+ Bases: Flow
Map periodic coordinates to fixed interval
+ +normflows/flows/periodic.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 |
|
__init__(ind, bound=1.0)
+
+Constructor
+ind: Iterable, indices of coordinates to be mapped +bound: Float or iterable, bound of interval
+ +normflows/flows/periodic.py
11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 |
|
periodic_test
+
+
+planar
+
+
+Planar
+
+
+
+ Bases: Flow
Planar flow as introduced in arXiv: 1505.05770
+ f(z) = z + u * h(w * z + b)
+
+
+ normflows/flows/planar.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 |
|
__init__(shape, act='tanh', u=None, w=None, b=None)
+
+Constructor of the planar flow
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ shape of the latent variable z + |
+ + required + | +
h |
+ + | +
+
+
+ nonlinear function h of the planar flow (see definition of f above) + |
+ + required + | +
u,w,b |
+ + | +
+
+
+ optional initialization for parameters + |
+ + required + | +
normflows/flows/planar.py
16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 |
|
planar_test
+
+
+radial
+
+
+Radial
+
+
+
+ Bases: Flow
Radial flow as introduced in arXiv: 1505.05770
+ f(z) = z + beta * h(alpha, r) * (z - z_0)
+
+
+ normflows/flows/radial.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 |
|
__init__(shape, z_0=None)
+
+Constructor of the radial flow
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ shape of the latent variable z + |
+ + required + | +
z_0 |
+ + | +
+
+
+ parameter of the radial flow + |
+
+ None
+ |
+
normflows/flows/radial.py
16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 |
|
radial_test
+
+
+reshape
+
+
+Merge
+
+
+
+ Bases: Split
Same as Split but with forward and backward pass interchanged
+ +normflows/flows/reshape.py
88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 |
|
Split
+
+
+
+ Bases: Flow
Split features into two sets
+ +normflows/flows/reshape.py
9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 |
|
__init__(mode='channel')
+
+Constructor
+The splitting mode can be:
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode |
+ + | +
+
+
+ splitting mode + |
+
+ 'channel'
+ |
+
normflows/flows/reshape.py
14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 |
|
Squeeze
+
+
+
+ Bases: Flow
Squeeze operation of multi-scale architecture, RealNVP or Glow paper
+ +normflows/flows/reshape.py
103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 |
|
__init__()
+
+Constructor
+ +normflows/flows/reshape.py
108 +109 +110 +111 +112 |
|
residual
+
+
+Residual
+
+
+
+ Bases: Flow
Invertible residual net block, wrapper to the implementation of Chen et al., +see sources
+ +normflows/flows/residual.py
12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 |
|
__init__(net, reverse=True, reduce_memory=True, geom_p=0.5, lamb=2.0, n_power_series=None, exact_trace=False, brute_force=False, n_samples=1, n_exact_terms=2, n_dist='geometric')
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
net |
+ + | +
+
+
+ Neural network, must be Lipschitz continuous with L < 1 + |
+ + required + | +
reverse |
+ + | +
+
+
+ Flag, if true the map |
+
+ True
+ |
+
reduce_memory |
+ + | +
+
+
+ Flag, if true Neumann series and precomputations, for backward pass in forward pass are done + |
+
+ True
+ |
+
geom_p |
+ + | +
+
+
+ Parameter of the geometric distribution used for the Neumann series + |
+
+ 0.5
+ |
+
lamb |
+ + | +
+
+
+ Parameter of the geometric distribution used for the Neumann series + |
+
+ 2.0
+ |
+
n_power_series |
+ + | +
+
+
+ Number of terms in the Neumann series + |
+
+ None
+ |
+
exact_trace |
+ + | +
+
+
+ Flag, if true the trace of the Jacobian is computed exactly + |
+
+ False
+ |
+
brute_force |
+ + | +
+
+
+ Flag, if true the Jacobian is computed exactly in 2D + |
+
+ False
+ |
+
n_samples |
+ + | +
+
+
+ Number of samples used to estimate power series + |
+
+ 1
+ |
+
n_exact_terms |
+ + | +
+
+
+ Number of terms always included in the power series + |
+
+ 2
+ |
+
n_dist |
+ + | +
+
+
+ Distribution used for the power series, either "geometric" or "poisson" + |
+
+ 'geometric'
+ |
+
normflows/flows/residual.py
18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 |
|
iResBlock
+
+
+
+ Bases: Module
normflows/flows/residual.py
78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 |
|
__init__(nnet, geom_p=0.5, lamb=2.0, n_power_series=None, exact_trace=False, brute_force=False, n_samples=1, n_exact_terms=2, n_dist='geometric', neumann_grad=True, grad_in_forward=False)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
nnet |
+ + | +
+
+
+ a nn.Module + |
+ + required + | +
n_power_series |
+ + | +
+
+
+ number of power series. If not None, uses a biased approximation to logdet. + |
+
+ None
+ |
+
exact_trace |
+ + | +
+
+
+ if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. + |
+
+ False
+ |
+
brute_force |
+ + | +
+
+
+ Computes the exact logdet. Only available for 2D inputs. + |
+
+ False
+ |
+
normflows/flows/residual.py
79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 |
|
residual_test
+
+
+stochastic
+
+
+HamiltonianMonteCarlo
+
+
+
+ Bases: Flow
Flow layer using the HMC proposal in Stochastic Normalising Flows
+ + +normflows/flows/stochastic.py
52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 |
|
__init__(target, steps, log_step_size, log_mass, max_abs_grad=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
target |
+ + | +
+
+
+ The stationary distribution of this Markov transition, i.e. the target distribution to sample from. + |
+ + required + | +
steps |
+ + | +
+
+
+ The number of leapfrog steps + |
+ + required + | +
log_step_size |
+ + | +
+
+
+ The log step size used in the leapfrog integrator. shape (dim) + |
+ + required + | +
log_mass |
+ + | +
+
+
+ The log_mass determining the variance of the momentum samples. shape (dim) + |
+ + required + | +
max_abs_grad |
+ + | +
+
+
+ Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability. + |
+
+ None
+ |
+
normflows/flows/stochastic.py
58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 |
|
MetropolisHastings
+
+
+
+ Bases: Flow
Sampling through Metropolis Hastings in Stochastic Normalizing Flow
+ + +normflows/flows/stochastic.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 |
|
__init__(target, proposal, steps)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
target |
+ + | +
+
+
+ The stationary distribution of this Markov transition, i.e. the target distribution to sample from. + |
+ + required + | +
proposal |
+ + | +
+
+
+ Proposal distribution + |
+ + required + | +
steps |
+ + | +
+
+
+ Number of MCMC steps to perform + |
+ + required + | +
normflows/flows/stochastic.py
12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 |
|
stochastic_test
+
+
+nets
+
+
+cnn
+
+
+ConvNet2d
+
+
+
+ Bases: Module
Convolutional Neural Network with leaky ReLU nonlinearities
+ +normflows/nets/cnn.py
5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 |
|
__init__(channels, kernel_size, leaky=0.0, init_zeros=True, actnorm=False, weight_std=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
channels |
+ + | +
+
+
+ List of channels of conv layers, first entry is in_channels + |
+ + required + | +
kernel_size |
+ + | +
+
+
+ List of kernel sizes, same for height and width + |
+ + required + | +
leaky |
+ + | +
+
+
+ Leaky part of ReLU + |
+
+ 0.0
+ |
+
init_zeros |
+ + | +
+
+
+ Flag whether last layer shall be initialized with zeros + |
+
+ True
+ |
+
scale_output |
+ + | +
+
+
+ Flag whether to scale output with a log scale parameter + |
+ + required + | +
logscale_factor |
+ + | +
+
+
+ Constant factor to be multiplied to log scaling + |
+ + required + | +
actnorm |
+ + | +
+
+
+ Flag whether activation normalization shall be done after each conv layer except output + |
+
+ False
+ |
+
weight_std |
+ + | +
+
+
+ Fixed std used to initialize every layer + |
+
+ None
+ |
+
normflows/nets/cnn.py
10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 |
|
lipschitz
+
+
+LipschitzCNN
+
+
+
+ Bases: Module
Convolutional neural network which is Lipschitz continuous +with Lipschitz constant L < 1
+ +normflows/nets/lipschitz.py
70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 |
|
__init__(channels, kernel_size, lipschitz_const=0.97, max_lipschitz_iter=5, lipschitz_tolerance=None, init_zeros=True)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
channels |
+ + | +
+
+
+ Integer list with the number of channels of the layers + |
+ + required + | +
kernel_size |
+ + | +
+
+
+ Integer list of kernel sizes of the layers + |
+ + required + | +
lipschitz_const |
+ + | +
+
+
+ Maximum Lipschitz constant of each layer + |
+
+ 0.97
+ |
+
max_lipschitz_iter |
+ + | +
+
+
+ Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used + |
+
+ 5
+ |
+
lipschitz_tolerance |
+ + | +
+
+
+ Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 + |
+
+ None
+ |
+
init_zeros |
+ + | +
+
+
+ Flag, whether to initialize last layer approximately with zeros + |
+
+ True
+ |
+
normflows/nets/lipschitz.py
76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 |
|
LipschitzMLP
+
+
+
+ Bases: Module
Fully connected neural net which is Lipschitz continuou with Lipschitz constant L < 1
+ +normflows/nets/lipschitz.py
14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__(channels, lipschitz_const=0.97, max_lipschitz_iter=5, lipschitz_tolerance=None, init_zeros=True)
+
+Constructor + channels: Integer list with the number of channels of +the layers + lipschitz_const: Maximum Lipschitz constant of each layer + max_lipschitz_iter: Maximum number of iterations used to +ensure that layers are Lipschitz continuous with L smaller than +set maximum; if None, tolerance is used + lipschitz_tolerance: Float, tolerance used to ensure +Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 + init_zeros: Flag, whether to initialize last layer +approximately with zeros
+ +normflows/nets/lipschitz.py
17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 |
|
projmax_(v)
+
+Inplace argmax on absolute value.
+ +normflows/nets/lipschitz.py
651 +652 +653 +654 +655 +656 |
|
made
+
+
+Implementation of MADE. +Code taken from https://github.com/bayesiains/nsf
+ + + +MADE
+
+
+
+ Bases: Module
Implementation of MADE.
+It can use either feedforward blocks or residual blocks (default is residual). +Optionally, it can use batch norm or dropout within blocks (default is no).
+ +normflows/nets/made.py
217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 |
|
MaskedFeedforwardBlock
+
+
+
+ Bases: Module
A feedforward block based on a masked linear module.
+NOTE In this implementation, the number of output features is taken to be equal to the number of input features.
+ +normflows/nets/made.py
84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 |
|
MaskedLinear
+
+
+
+ Bases: Linear
A linear module with a masked weight matrix.
+ +normflows/nets/made.py
19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 |
|
MaskedResidualBlock
+
+
+
+ Bases: Module
A residual block containing masked linear modules.
+ +normflows/nets/made.py
140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 |
|
made_test
+
+
+Tests for MADE. +Code partially taken from https://github.com/bayesiains/nsf
+ + + +mlp
+
+
+MLP
+
+
+
+ Bases: Module
A multilayer perceptron with Leaky ReLU nonlinearities
+ +normflows/nets/mlp.py
5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 |
|
__init__(layers, leaky=0.0, score_scale=None, output_fn=None, output_scale=None, init_zeros=False, dropout=None)
+
+layers: list of layer sizes from start to end
+leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used
+score_scale: Factor to apply to the scores, i.e. output before output_fn.
+output_fn: String, function to be applied to the output, either None, "sigmoid", "relu", "tanh", or "clampexp"
+output_scale: Rescale outputs if output_fn is specified, i.e. scale * output_fn(out / scale)
+init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see arXiv 1807.03039)
+dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done
normflows/nets/mlp.py
10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 |
|
resnet
+
+
+ResidualBlock
+
+
+
+ Bases: Module
A general-purpose residual block. Works only with 1-dim inputs.
+ +normflows/nets/resnet.py
7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 |
|
ResidualNet
+
+
+
+ Bases: Module
A general-purpose residual network. Works only with 1-dim inputs.
+ +normflows/nets/resnet.py
53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 |
|
sampling
+
+
+hais
+
+
+HAIS
+
+
+Class which performs HAIS
+ +normflows/sampling/hais.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 |
|
__init__(betas, prior, target, num_leapfrog, step_size, log_mass)
+
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
betas |
+ + | +
+
+
+ Annealing schedule, the jth target is |
+ + required + | +
prior |
+ + | +
+
+
+ The prior distribution to start the HAIS chain. + |
+ + required + | +
target |
+ + | +
+
+
+ The target distribution from which we would like to draw weighted samples. + |
+ + required + | +
num_leapfrog |
+ + | +
+
+
+ Number of leapfrog steps in the HMC transitions. + |
+ + required + | +
step_size |
+ + | +
+
+
+ step_size to use for HMC transitions. + |
+ + required + | +
log_mass |
+ + | +
+
+
+ log_mass to use for HMC transitions. + |
+ + required + | +
normflows/sampling/hais.py
13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 |
|
sample(num_samples)
+
+Run HAIS to draw samples from the target with appropriate weights.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_samples |
+ + | +
+
+
+ The number of samples to draw.a + |
+ + required + | +
normflows/sampling/hais.py
37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 |
|
transforms
+
+
+Logit
+
+
+
+ Bases: Flow
Logit mapping of image tensor, see RealNVP paper
+logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x))
+
+
+ normflows/transforms.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 |
|
__init__(alpha=0.05)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
alpha |
+ + | +
+
+
+ Alpha parameter, see above + |
+
+ 0.05
+ |
+
normflows/transforms.py
17 +18 +19 +20 +21 +22 +23 +24 |
|
Shift
+
+
+
+ Bases: Flow
Shift data by a fixed constant
+Default is -0.5 to shift data from +interval [0, 1] to [-0.5, 0.5]
+ +normflows/transforms.py
50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 |
|
__init__(shift=-0.5)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shift |
+ + | +
+
+
+ Shift to apply to the data + |
+
+ -0.5
+ |
+
normflows/transforms.py
57 +58 +59 +60 +61 +62 +63 +64 |
|
transforms_test
+
+
+utils
+
+
+eval
+
+
+bitsPerDim(model, x, y=None, trans='logit', trans_param=[0.05])
+
+Computes the bits per dim for a batch of data
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
model |
+ + | +
+
+
+ Model to compute bits per dim for + |
+ + required + | +
x |
+ + | +
+
+
+ Batch of data + |
+ + required + | +
y |
+ + | +
+
+
+ Class labels for batch of data if base distribution is class conditional + |
+
+ None
+ |
+
trans |
+ + | +
+
+
+ Transformation to be applied to images during training + |
+
+ 'logit'
+ |
+
trans_param |
+ + | +
+
+
+ List of parameters of the transformation + |
+
+ [0.05]
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Bits per dim for data batch under model + |
+
normflows/utils/eval.py
5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 |
|
bitsPerDimDataset(model, data_loader, class_cond=True, trans='logit', trans_param=[0.05])
+
+Computes average bits per dim for an entire dataset given by a data loader
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
model |
+ + | +
+
+
+ Model to compute bits per dim for + |
+ + required + | +
data_loader |
+ + | +
+
+
+ Data loader of dataset + |
+ + required + | +
class_cond |
+ + | +
+
+
+ Flag indicating whether model is class_conditional + |
+
+ True
+ |
+
trans |
+ + | +
+
+
+ Transformation to be applied to images during training + |
+
+ 'logit'
+ |
+
trans_param |
+ + | +
+
+
+ List of parameters of the transformation + |
+
+ [0.05]
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Average bits per dim for dataset + |
+
normflows/utils/eval.py
37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 |
|
masks
+
+
+create_alternating_binary_mask(features, even=True)
+
+Creates a binary mask of a given dimension which alternates its masking.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+ + | +
+
+
+ Dimension of mask. + |
+ + required + | +
even |
+ + | +
+
+
+ If True, even values are assigned 1s, odd 0s. If False, vice versa. + |
+
+ True
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Alternating binary mask of type torch.Tensor. + |
+
normflows/utils/masks.py
4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 |
|
create_mid_split_binary_mask(features)
+
+Creates a binary mask of a given dimension which splits its masking at the midpoint.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+ + | +
+
+
+ Dimension of mask. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Binary mask split at midpoint of type torch.Tensor + |
+
normflows/utils/masks.py
20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 |
|
create_random_binary_mask(features, seed=None)
+
+Creates a random binary mask of a given dimension with half of its entries randomly set to 1s.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+ + | +
+
+
+ Dimension of mask. + |
+ + required + | +
seed |
+ + | +
+
+
+ Seed to be used + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Binary mask with half of its entries set to 1s, of type torch.Tensor. + |
+
normflows/utils/masks.py
35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 |
|
nn
+
+
+ActNorm
+
+
+
+ Bases: Module
ActNorm layer with just one forward pass
+ +normflows/utils/nn.py
26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 |
|
__init__(shape)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape |
+ + | +
+
+
+ Same as shape in flows.ActNorm + |
+ + required + | +
logscale_factor |
+ + | +
+
+
+ Same as shape in flows.ActNorm + |
+ + required + | +
normflows/utils/nn.py
30 +31 +32 +33 +34 +35 +36 +37 +38 +39 |
|
ClampExp
+
+
+
+ Bases: Module
Nonlinearity min(exp(lam * x), 1)
+ +normflows/utils/nn.py
46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 |
|
__init__()
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lam |
+ + | +
+
+
+ Lambda parameter + |
+ + required + | +
normflows/utils/nn.py
51 +52 +53 +54 +55 +56 +57 |
|
ConstScaleLayer
+
+
+
+ Bases: Module
Scaling features by a fixed factor
+ +normflows/utils/nn.py
7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 |
|
__init__(scale=1.0)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
scale |
+ + | +
+
+
+ Scale to apply to features + |
+
+ 1.0
+ |
+
normflows/utils/nn.py
12 +13 +14 +15 +16 +17 +18 +19 +20 |
|
PeriodicFeaturesCat
+
+
+
+ Bases: Module
Converts a specified part of the input to periodic features by +replacing those features f with [sin(scale * f), cos(scale * f)].
+Note that this decreases the number of features and their order +is changed.
+ +normflows/utils/nn.py
133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 |
|
__init__(ndim, ind, scale=1.0)
+
+Constructor +:param ndim: Int, number of dimensions +:param ind: Iterable, indices of input elements to convert to +periodic features +:param scale: Scalar or iterable, used to scale inputs before +converting them to periodic features
+ +normflows/utils/nn.py
142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 |
|
PeriodicFeaturesElementwise
+
+
+
+ Bases: Module
Converts a specified part of the input to periodic features by +replacing those features f with +w1 * sin(scale * f) + w2 * cos(scale * f).
+Note that this operation is done elementwise and, therefore, +some information about the feature can be lost.
+ +normflows/utils/nn.py
64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 |
|
__init__(ndim, ind, scale=1.0, bias=False, activation=None)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
ndim |
+
+ int
+ |
+
+
+
+ number of dimensions + |
+ + required + | +
ind |
+
+ iterable
+ |
+
+
+
+ indices of input elements to convert to periodic features + |
+ + required + | +
scale |
+ + | +
+
+
+ Scalar or iterable, used to scale inputs before converting them to periodic features + |
+
+ 1.0
+ |
+
bias |
+ + | +
+
+
+ Flag, whether to add a bias + |
+
+ False
+ |
+
activation |
+ + | +
+
+
+ Function or None, activation function to be applied + |
+
+ None
+ |
+
normflows/utils/nn.py
74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 |
|
sum_except_batch(x, num_batch_dims=1)
+
+Sums all elements of x
except for the first num_batch_dims
dimensions.
normflows/utils/nn.py
190 +191 +192 +193 |
|
optim
+
+
+clear_grad(model)
+
+Set gradients of model parameter to None as this speeds up training,
+See youtube
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
model |
+ + | +
+
+
+ Model to clear gradients of + |
+ + required + | +
normflows/utils/optim.py
16 +17 +18 +19 +20 +21 +22 +23 +24 +25 |
|
set_requires_grad(module, flag)
+
+Sets requires_grad flag of all parameters of a torch.nn.module
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
module |
+ + | +
+
+
+ torch.nn.module + |
+ + required + | +
flag |
+ + | +
+
+
+ Flag to set requires_grad to + |
+ + required + | +
normflows/utils/optim.py
4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 |
|
preprocessing
+
+
+Jitter
+
+
+Transform for dataloader, adds uniform jitter noise to data
+ +normflows/utils/preprocessing.py
28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 |
|
__init__(scale=1.0 / 256)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
scale |
+ + | +
+
+
+ Scaling factor for noise + |
+
+ 1.0 / 256
+ |
+
normflows/utils/preprocessing.py
31 +32 +33 +34 +35 +36 +37 |
|
Logit
+
+
+Transform for dataloader
+logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x))
+
+
+ normflows/utils/preprocessing.py
4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 |
|
__init__(alpha=0)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
alpha |
+ + | +
+
+
+ see above + |
+
+ 0
+ |
+
normflows/utils/preprocessing.py
12 +13 +14 +15 +16 +17 +18 |
|
Scale
+
+
+Transform for dataloader, adds uniform jitter noise to data
+ +normflows/utils/preprocessing.py
45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 |
|
__init__(scale=255.0 / 256.0)
+
+Constructor
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
scale |
+ + | +
+
+
+ Scaling factor for noise + |
+
+ 255.0 / 256.0
+ |
+
normflows/utils/preprocessing.py
48 +49 +50 +51 +52 +53 +54 |
|
' + escapeHtml(summary) +'
' + noResultsText + '
'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..6cd882d5 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"normflows : A PyTorch Package for Normalizing Flows normflows is a PyTorch implementation of discrete normalizing flows. Many popular flow architectures are implemented, see the list below . The package can be easily installed via pip . The basic usage is described here , and a full documentation is available as well. A more detailed description of this package is given in our accompanying paper . Several sample use cases are provided in the examples folder , including Glow , a VAE , and a Residual Flow . Moreover, two simple applications are highlighed in the examples section . You can run them yourself in Google Colab using the links below to get a feeling for normflows . Link Description Real NVP applied to a 2D bimodal target distribution Modeling a distribution on a cylinder surface with a neural spline flow Modeling and generating CIFAR-10 images with Glow Implemented Flows Architecture Reference Planar Flow Rezende & Mohamed, 2015 Radial Flow Rezende & Mohamed, 2015 NICE Dinh et al., 2014 Real NVP Dinh et al., 2017 Glow Kingma et al., 2018 Masked Autoregressive Flow Papamakarios et al., 2017 Neural Spline Flow Durkan et al., 2019 Circular Neural Spline Flow Rezende et al., 2020 Residual Flow Chen et al., 2019 Stochastic Normalizing Flow Wu et al., 2020 Note that Neural Spline Flows with circular and non-circular coordinates are supported as well. Installation The latest version of the package can be installed via pip pip install normflows At least Python 3.7 is required. If you want to use a GPU, make sure that PyTorch is set up correctly by following the instructions at the PyTorch website . To run the example notebooks clone the repository first git clone https://github.com/VincentStimper/normalizing-flows.git and then install the dependencies. pip install -r requirements_examples.txt Usage A normalizing flow consists of a base distribution, defined in nf.distributions.base , and a list of flows, given in nf.flows . Let's assume our target is a 2D distribution. We pick a diagonal Gaussian base distribution, which is the most popular choice. Our flow shall be a Real NVP model and, therefore, we need to define a neural network for computing the parameters of the affine coupling map. One dimension is used to compute the scale and shift parameter for the other dimension. After each coupling layer we swap their roles. import normflows as nf # Define 2D Gaussian base distribution base = nf.distributions.base.DiagGaussian(2) # Define list of flows num_layers = 32 flows = [] for i in range(num_layers): # Neural network with two hidden layers having 64 units each # Last layer is initialized by zeros making training more stable param_map = nf.nets.MLP([1, 64, 64, 2], init_zeros=True) # Add flow layer flows.append(nf.flows.AffineCouplingBlock(param_map)) # Swap dimensions flows.append(nf.flows.Permute(2, mode='swap')) Once they are set up, we can define a nf.NormalizingFlow model. If the target density is available, it can be added to the model to be used during training. Sample target distributions are given in nf.distributions.target . # If the target density is not given model = nf.NormalizingFlow(base, flows) # If the target density is given target = nf.distributions.target.TwoMoons() model = nf.NormalizingFlow(base, flows, target) The loss can be computed with the methods of the model and minimized. # When doing maximum likelihood learning, i.e. minimizing the forward KLD # with no target distribution given loss = model.forward_kld(x) # When minimizing the reverse KLD based on the given target distribution loss = model.reverse_kld(num_samples=512) # Optimization as usual loss.backward() optimizer.step() Examples We provide several illustrative examples of how to use the package in the examples directory. Among them are implementations of Glow , a VAE , and a Residual Flow . More advanced experiments can be done with the scripts listed in the repository about resampled base distributions , see its experiments folder. Below, we consider two simple 2D examples. Real NVP applied to a 2D bimodal target distribution In this notebook , which can directly be opened in Colab , we consider a 2D distribution with two half-moon-shaped modes as a target. We approximate it with a Real NVP model and obtain the following results. Note that there might be a density filament connecting the two modes, which is due to an architectural limitation of normalizing flows, especially prominent in Real NVP. You can find out more about it in this paper . Modeling a distribution on a cylinder surface with a neural spline flow In another example , which is available in Colab as well, we apply a Neural Spline Flow model to a distribution defined on a cylinder. The resulting density is visualized below. This example is considered in the paper accompanying this repository. Support If you have problems, please read the package documentation and check out the examples section above. You are also welcome to create issues on GitHub to get help. Note that it is worthwhile browsing the existing open and closed issues, which might address the problem you are facing. Contributing If you find a bug or have a feature request, please file an issue on GitHub . You are welcome to contribute to the package by fixing the bug or adding the feature yourself. If you want to contribute, please add tests for the code you added or modified and ensure it passes successfully by running pytest . This can be done by simply executing pytest within your local version of the repository. Make sure you code is well documented, and we also encourage contributions to the existing documentation. Once you finished coding and testing, please create a pull request on GitHub . Used by The package has been used in several research papers. Some of them are listed below. Andrew Campbell, Wenlong Chen, Vincent Stimper, Jos\u00e9 Miguel Hern\u00e1ndez-Lobato, and Yichuan Zhang. A gradient based strategy for Hamiltonian Monte Carlo hyperparameter optimization . In Proceedings of the 38th International Conference on Machine Learning, pp. 1238\u20131248. PMLR, 2021. Code available on GitHub. Vincent Stimper, Bernhard Sch\u00f6lkopf, and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato. Resampling Base Distributions of Normalizing Flows . In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, volume 151, pp. 4915\u20134936, 2022. Code available on GitHub. Laurence I. Midgley, Vincent Stimper, Gregor N. C. Simm, Bernhard Sch\u00f6lkopf, and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato. Flow Annealed Importance Sampling Bootstrap . The Eleventh International Conference on Learning Representations, 2023. Code available on GitHub. Arnau Quera-Bofarull, Joel Dyer, Anisoara Calinescu, J. Doyne Farmer, and Michael Wooldridge. BlackBIRDS: Black-Box Inference foR Differentiable Simulators . Journal of Open Source Software, 8(89), 5776, 2023. Code available on GitHub. Utkarsh Singhal, Carlos Esteves, Ameesh Makadia, and Stella X. Yu. Learning to Transform for Generalizable Instance-wise Invariance . Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 6211-6221, 2023. Code available on GitHub. Ba-Hien Tran, Giulio Franzese, Pietro Michiardi, and Maurizio Filippone. One-Line-of-Code Data Mollification Improves Optimization of Likelihood-based Generative Models . Advances in Neural Information Processing Systems 36, pp. 6545\u20136567, 2023. Code available on GitHub. Moreover, the boltzgen package has been build upon normflows . Citation If you use normflows , please cite the corresponding paper as follows. Stimper et al., (2023). normflows: A PyTorch Package for Normalizing Flows. Journal of Open Source Software, 8(86), 5361, https://doi.org/10.21105/joss.05361 Bibtex @article{Stimper2023, author = {Vincent Stimper and David Liu and Andrew Campbell and Vincent Berenz and Lukas Ryll and Bernhard Sch\u00f6lkopf and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato}, title = {normflows: A PyTorch Package for Normalizing Flows}, journal = {Journal of Open Source Software}, volume = {8}, number = {86}, pages = {5361}, publisher = {The Open Journal}, doi = {10.21105/joss.05361}, url = {https://doi.org/10.21105/joss.05361}, year = {2023} }","title":"About"},{"location":"#normflows-a-pytorch-package-for-normalizing-flows","text":"normflows is a PyTorch implementation of discrete normalizing flows. Many popular flow architectures are implemented, see the list below . The package can be easily installed via pip . The basic usage is described here , and a full documentation is available as well. A more detailed description of this package is given in our accompanying paper . Several sample use cases are provided in the examples folder , including Glow , a VAE , and a Residual Flow . Moreover, two simple applications are highlighed in the examples section . You can run them yourself in Google Colab using the links below to get a feeling for normflows . Link Description Real NVP applied to a 2D bimodal target distribution Modeling a distribution on a cylinder surface with a neural spline flow Modeling and generating CIFAR-10 images with Glow","title":"normflows: A PyTorch Package for Normalizing Flows"},{"location":"#implemented-flows","text":"Architecture Reference Planar Flow Rezende & Mohamed, 2015 Radial Flow Rezende & Mohamed, 2015 NICE Dinh et al., 2014 Real NVP Dinh et al., 2017 Glow Kingma et al., 2018 Masked Autoregressive Flow Papamakarios et al., 2017 Neural Spline Flow Durkan et al., 2019 Circular Neural Spline Flow Rezende et al., 2020 Residual Flow Chen et al., 2019 Stochastic Normalizing Flow Wu et al., 2020 Note that Neural Spline Flows with circular and non-circular coordinates are supported as well.","title":"Implemented Flows"},{"location":"#installation","text":"The latest version of the package can be installed via pip pip install normflows At least Python 3.7 is required. If you want to use a GPU, make sure that PyTorch is set up correctly by following the instructions at the PyTorch website . To run the example notebooks clone the repository first git clone https://github.com/VincentStimper/normalizing-flows.git and then install the dependencies. pip install -r requirements_examples.txt","title":"Installation"},{"location":"#usage","text":"A normalizing flow consists of a base distribution, defined in nf.distributions.base , and a list of flows, given in nf.flows . Let's assume our target is a 2D distribution. We pick a diagonal Gaussian base distribution, which is the most popular choice. Our flow shall be a Real NVP model and, therefore, we need to define a neural network for computing the parameters of the affine coupling map. One dimension is used to compute the scale and shift parameter for the other dimension. After each coupling layer we swap their roles. import normflows as nf # Define 2D Gaussian base distribution base = nf.distributions.base.DiagGaussian(2) # Define list of flows num_layers = 32 flows = [] for i in range(num_layers): # Neural network with two hidden layers having 64 units each # Last layer is initialized by zeros making training more stable param_map = nf.nets.MLP([1, 64, 64, 2], init_zeros=True) # Add flow layer flows.append(nf.flows.AffineCouplingBlock(param_map)) # Swap dimensions flows.append(nf.flows.Permute(2, mode='swap')) Once they are set up, we can define a nf.NormalizingFlow model. If the target density is available, it can be added to the model to be used during training. Sample target distributions are given in nf.distributions.target . # If the target density is not given model = nf.NormalizingFlow(base, flows) # If the target density is given target = nf.distributions.target.TwoMoons() model = nf.NormalizingFlow(base, flows, target) The loss can be computed with the methods of the model and minimized. # When doing maximum likelihood learning, i.e. minimizing the forward KLD # with no target distribution given loss = model.forward_kld(x) # When minimizing the reverse KLD based on the given target distribution loss = model.reverse_kld(num_samples=512) # Optimization as usual loss.backward() optimizer.step()","title":"Usage"},{"location":"#examples","text":"We provide several illustrative examples of how to use the package in the examples directory. Among them are implementations of Glow , a VAE , and a Residual Flow . More advanced experiments can be done with the scripts listed in the repository about resampled base distributions , see its experiments folder. Below, we consider two simple 2D examples.","title":"Examples"},{"location":"#real-nvp-applied-to-a-2d-bimodal-target-distribution","text":"In this notebook , which can directly be opened in Colab , we consider a 2D distribution with two half-moon-shaped modes as a target. We approximate it with a Real NVP model and obtain the following results. Note that there might be a density filament connecting the two modes, which is due to an architectural limitation of normalizing flows, especially prominent in Real NVP. You can find out more about it in this paper .","title":"Real NVP applied to a 2D bimodal target distribution"},{"location":"#modeling-a-distribution-on-a-cylinder-surface-with-a-neural-spline-flow","text":"In another example , which is available in Colab as well, we apply a Neural Spline Flow model to a distribution defined on a cylinder. The resulting density is visualized below. This example is considered in the paper accompanying this repository.","title":"Modeling a distribution on a cylinder surface with a neural spline flow"},{"location":"#support","text":"If you have problems, please read the package documentation and check out the examples section above. You are also welcome to create issues on GitHub to get help. Note that it is worthwhile browsing the existing open and closed issues, which might address the problem you are facing.","title":"Support"},{"location":"#contributing","text":"If you find a bug or have a feature request, please file an issue on GitHub . You are welcome to contribute to the package by fixing the bug or adding the feature yourself. If you want to contribute, please add tests for the code you added or modified and ensure it passes successfully by running pytest . This can be done by simply executing pytest within your local version of the repository. Make sure you code is well documented, and we also encourage contributions to the existing documentation. Once you finished coding and testing, please create a pull request on GitHub .","title":"Contributing"},{"location":"#used-by","text":"The package has been used in several research papers. Some of them are listed below. Andrew Campbell, Wenlong Chen, Vincent Stimper, Jos\u00e9 Miguel Hern\u00e1ndez-Lobato, and Yichuan Zhang. A gradient based strategy for Hamiltonian Monte Carlo hyperparameter optimization . In Proceedings of the 38th International Conference on Machine Learning, pp. 1238\u20131248. PMLR, 2021. Code available on GitHub. Vincent Stimper, Bernhard Sch\u00f6lkopf, and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato. Resampling Base Distributions of Normalizing Flows . In Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, volume 151, pp. 4915\u20134936, 2022. Code available on GitHub. Laurence I. Midgley, Vincent Stimper, Gregor N. C. Simm, Bernhard Sch\u00f6lkopf, and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato. Flow Annealed Importance Sampling Bootstrap . The Eleventh International Conference on Learning Representations, 2023. Code available on GitHub. Arnau Quera-Bofarull, Joel Dyer, Anisoara Calinescu, J. Doyne Farmer, and Michael Wooldridge. BlackBIRDS: Black-Box Inference foR Differentiable Simulators . Journal of Open Source Software, 8(89), 5776, 2023. Code available on GitHub. Utkarsh Singhal, Carlos Esteves, Ameesh Makadia, and Stella X. Yu. Learning to Transform for Generalizable Instance-wise Invariance . Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 6211-6221, 2023. Code available on GitHub. Ba-Hien Tran, Giulio Franzese, Pietro Michiardi, and Maurizio Filippone. One-Line-of-Code Data Mollification Improves Optimization of Likelihood-based Generative Models . Advances in Neural Information Processing Systems 36, pp. 6545\u20136567, 2023. Code available on GitHub. Moreover, the boltzgen package has been build upon normflows .","title":"Used by"},{"location":"#citation","text":"If you use normflows , please cite the corresponding paper as follows. Stimper et al., (2023). normflows: A PyTorch Package for Normalizing Flows. Journal of Open Source Software, 8(86), 5361, https://doi.org/10.21105/joss.05361 Bibtex @article{Stimper2023, author = {Vincent Stimper and David Liu and Andrew Campbell and Vincent Berenz and Lukas Ryll and Bernhard Sch\u00f6lkopf and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato}, title = {normflows: A PyTorch Package for Normalizing Flows}, journal = {Journal of Open Source Software}, volume = {8}, number = {86}, pages = {5361}, publisher = {The Open Journal}, doi = {10.21105/joss.05361}, url = {https://doi.org/10.21105/joss.05361}, year = {2023} }","title":"Citation"},{"location":"references/","text":"API references core ClassCondFlow Bases: Module Class conditional normalizing Flow model, providing the class to be conditioned on only to the base distribution, as done e.g. in Glow Source code in normflows/core.py 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 class ClassCondFlow ( nn . Module ): \"\"\" Class conditional normalizing Flow model, providing the class to be conditioned on only to the base distribution, as done e.g. in [Glow](https://arxiv.org/abs/1807.03039) \"\"\" def __init__ ( self , q0 , flows ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) def forward_kld ( self , x , y ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return - torch . mean ( log_q ) def sample ( self , num_samples = 1 , y = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , y ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return log_q def save ( self , path ): \"\"\"Save state dict of model Args: param path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) __init__ ( q0 , flows ) Constructor Parameters: Name Type Description Default q0 Base distribution required flows List of flows required Source code in normflows/core.py 376 377 378 379 380 381 382 383 384 385 def __init__ ( self , q0 , flows ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) forward_kld ( x , y ) Estimates forward KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 def forward_kld ( self , x , y ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return - torch . mean ( log_q ) load ( path ) Load model from state dict Parameters: Name Type Description Default path Path including filename where to load model from required Source code in normflows/core.py 446 447 448 449 450 451 452 def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) log_prob ( x , y ) Get log probability for batch Parameters: Name Type Description Default x Batch required y Classes of x required Returns: Type Description log probability Source code in normflows/core.py 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return log_q sample ( num_samples = 1 , y = None ) Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 y Classes to sample from, will be sampled uniformly if None None Returns: Type Description Samples, log probability Source code in normflows/core.py 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 def sample ( self , num_samples = 1 , y = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , y ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q save ( path ) Save state dict of model Parameters: Name Type Description Default param path Path including filename where to save model required Source code in normflows/core.py 438 439 440 441 442 443 444 def save ( self , path ): \"\"\"Save state dict of model Args: param path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) ConditionalNormalizingFlow Bases: NormalizingFlow Conditional normalizing flow model, providing condition, which is also called context, to both the base distribution and the flow layers Source code in normflows/core.py 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 class ConditionalNormalizingFlow ( NormalizingFlow ): \"\"\" Conditional normalizing flow model, providing condition, which is also called context, to both the base distribution and the flow layers \"\"\" def forward ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z , context = context ) return z def forward_and_log_det ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z , context = context ) log_det += log_d return z , log_det def inverse ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x , context = context ) return x def inverse_and_log_det ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x , context = context ) log_det += log_d return x , log_det def sample ( self , num_samples = 1 , context = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw context: Batch of conditions/context Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , context = context ) for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det return z , log_q def log_prob ( self , x , context = None ): \"\"\"Get log probability for batch Args: x: Batch context: Batch of conditions/context Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return log_q def forward_kld ( self , x , context = None ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution context: Batch of conditions/context Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return - torch . mean ( log_q ) def reverse_kld ( self , num_samples = 1 , context = None , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution context: Batch of conditions/context beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples , context = context ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z_ , context = context ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z , context = context ) return torch . mean ( log_q ) - beta * torch . mean ( log_p ) forward ( z , context = None ) Transforms latent variable z to the flow variable x Parameters: Name Type Description Default z Batch in the latent space required context Batch of conditions/context None Returns: Type Description Batch in the space of the target distribution Source code in normflows/core.py 222 223 224 225 226 227 228 229 230 231 232 233 234 def forward ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z , context = context ) return z forward_and_log_det ( z , context = None ) Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Parameters: Name Type Description Default z Batch in the latent space required context Batch of conditions/context None Returns: Type Description Batch in the space of the target distribution, log determinant of the Jacobian Source code in normflows/core.py 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 def forward_and_log_det ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z , context = context ) log_det += log_d return z , log_det forward_kld ( x , context = None ) Estimates forward KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required context Batch of conditions/context None Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 def forward_kld ( self , x , context = None ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution context: Batch of conditions/context Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return - torch . mean ( log_q ) inverse ( x , context = None ) Transforms flow variable x to the latent variable z Parameters: Name Type Description Default x Batch in the space of the target distribution required context Batch of conditions/context None Returns: Type Description Batch in the latent space Source code in normflows/core.py 254 255 256 257 258 259 260 261 262 263 264 265 266 def inverse ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x , context = context ) return x inverse_and_log_det ( x , context = None ) Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Parameters: Name Type Description Default x Batch in the space of the target distribution required context Batch of conditions/context None Returns: Type Description Batch in the latent space, log determinant of the Jacobian Source code in normflows/core.py 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 def inverse_and_log_det ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x , context = context ) log_det += log_d return x , log_det log_prob ( x , context = None ) Get log probability for batch Parameters: Name Type Description Default x Batch required context Batch of conditions/context None Returns: Type Description log probability Source code in normflows/core.py 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 def log_prob ( self , x , context = None ): \"\"\"Get log probability for batch Args: x: Batch context: Batch of conditions/context Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return log_q reverse_kld ( num_samples = 1 , context = None , beta = 1.0 , score_fn = True ) Estimates reverse KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default num_samples Number of samples to draw from base distribution 1 context Batch of conditions/context None beta Annealing parameter, see arXiv 1505.05770 1.0 score_fn Flag whether to include score function in gradient, see arXiv 1703.09194 True Returns: Type Description Estimate of the reverse KL divergence averaged over latent samples Source code in normflows/core.py 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 def reverse_kld ( self , num_samples = 1 , context = None , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution context: Batch of conditions/context beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples , context = context ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z_ , context = context ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z , context = context ) return torch . mean ( log_q ) - beta * torch . mean ( log_p ) sample ( num_samples = 1 , context = None ) Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 context Batch of conditions/context None Returns: Type Description Samples, log probability Source code in normflows/core.py 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 def sample ( self , num_samples = 1 , context = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw context: Batch of conditions/context Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , context = context ) for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det return z , log_q MultiscaleFlow Bases: Module Normalizing Flow model with multiscale architecture, see RealNVP or Glow paper Source code in normflows/core.py 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 class MultiscaleFlow ( nn . Module ): \"\"\" Normalizing Flow model with multiscale architecture, see RealNVP or Glow paper \"\"\" def __init__ ( self , q0 , flows , merges , transform = None , class_cond = True ): \"\"\"Constructor Args: q0: List of base distribution flows: List of list of flows for each level merges: List of merge/split operations (forward pass must do merge) transform: Initial transformation of inputs class_cond: Flag, indicated whether model has class conditional base distributions \"\"\" super () . __init__ () self . q0 = nn . ModuleList ( q0 ) self . num_levels = len ( self . q0 ) self . flows = torch . nn . ModuleList ([ nn . ModuleList ( flow ) for flow in flows ]) self . merges = torch . nn . ModuleList ( merges ) self . transform = transform self . class_cond = class_cond def forward_kld ( self , x , y = None ): \"\"\"Estimates forward KL divergence, see see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution y: Batch of targets, if applicable Returns: Estimate of forward KL divergence averaged over batch \"\"\" return - torch . mean ( self . log_prob ( x , y )) def forward ( self , x , y = None ): \"\"\"Get negative log-likelihood for maximum likelihood training Args: x: Batch of data y: Batch of targets, if applicable Returns: Negative log-likelihood of the batch \"\"\" return - self . log_prob ( x , y ) def forward_and_log_det ( self , z ): \"\"\"Get observed variable x from list of latent variables z Args: z: List of latent variables Returns: Observed variable x, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( z [ 0 ]), dtype = z [ 0 ] . dtype , device = z [ 0 ] . device ) for i in range ( len ( self . q0 )): if i == 0 : z_ = z [ 0 ] else : z_ , log_det_ = self . merges [ i - 1 ]([ z_ , z [ i ]]) log_det += log_det_ for flow in self . flows [ i ]: z_ , log_det_ = flow ( z_ ) log_det += log_det_ if self . transform is not None : z_ , log_det_ = self . transform ( z_ ) log_det += log_det_ return z_ , log_det def inverse_and_log_det ( self , x ): \"\"\"Get latent variable z from observed variable x Args: x: Observed variable Returns: List of latent variables z, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) if self . transform is not None : x , log_det_ = self . transform . inverse ( x ) log_det += log_det_ z = [ None ] * len ( self . q0 ) for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for flow in reversed ( self . flows [ i ]): x , log_det_ = flow . inverse ( x ) log_det += log_det_ if i == 0 : z [ i ] = x else : [ x , z [ i ]], log_det_ = self . merges [ i - 1 ] . inverse ( x ) log_det += log_det_ return z , log_det def sample ( self , num_samples = 1 , y = None , temperature = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None temperature: Temperature parameter for temp annealed sampling Returns: Samples, log probability \"\"\" if temperature is not None : self . set_temperature ( temperature ) for i in range ( len ( self . q0 )): if self . class_cond : z_ , log_q_ = self . q0 [ i ]( num_samples , y ) else : z_ , log_q_ = self . q0 [ i ]( num_samples ) if i == 0 : log_q = log_q_ z = z_ else : log_q += log_q_ z , log_det = self . merges [ i - 1 ]([ z , z_ ]) log_q -= log_det for flow in self . flows [ i ]: z , log_det = flow ( z ) log_q -= log_det if self . transform is not None : z , log_det = self . transform ( z ) log_q -= log_det if temperature is not None : self . reset_temperature () return z , log_q def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = 0 z = x if self . transform is not None : z , log_det = self . transform . inverse ( z ) log_q += log_det for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for j in range ( len ( self . flows [ i ]) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ][ j ] . inverse ( z ) log_q += log_det if i > 0 : [ z , z_ ], log_det = self . merges [ i - 1 ] . inverse ( z ) log_q += log_det else : z_ = z if self . class_cond : log_q += self . q0 [ i ] . log_prob ( z_ , y ) else : log_q += self . q0 [ i ] . log_prob ( z_ ) return log_q def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) def set_temperature ( self , temperature ): \"\"\"Set temperature for temperature a annealed sampling Args: temperature: Temperature parameter \"\"\" for q0 in self . q0 : if hasattr ( q0 , \"temperature\" ): q0 . temperature = temperature else : raise NotImplementedError ( \"One base function does not \" \"support temperature annealed sampling\" ) def reset_temperature ( self ): \"\"\" Set temperature values of base distributions back to None \"\"\" self . set_temperature ( None ) __init__ ( q0 , flows , merges , transform = None , class_cond = True ) Constructor Args: q0: List of base distribution flows: List of list of flows for each level merges: List of merge/split operations (forward pass must do merge) transform: Initial transformation of inputs class_cond: Flag, indicated whether model has class conditional base distributions Source code in normflows/core.py 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 def __init__ ( self , q0 , flows , merges , transform = None , class_cond = True ): \"\"\"Constructor Args: q0: List of base distribution flows: List of list of flows for each level merges: List of merge/split operations (forward pass must do merge) transform: Initial transformation of inputs class_cond: Flag, indicated whether model has class conditional base distributions \"\"\" super () . __init__ () self . q0 = nn . ModuleList ( q0 ) self . num_levels = len ( self . q0 ) self . flows = torch . nn . ModuleList ([ nn . ModuleList ( flow ) for flow in flows ]) self . merges = torch . nn . ModuleList ( merges ) self . transform = transform self . class_cond = class_cond forward ( x , y = None ) Get negative log-likelihood for maximum likelihood training Parameters: Name Type Description Default x Batch of data required y Batch of targets, if applicable None Returns: Type Description Negative log-likelihood of the batch Source code in normflows/core.py 492 493 494 495 496 497 498 499 500 501 502 def forward ( self , x , y = None ): \"\"\"Get negative log-likelihood for maximum likelihood training Args: x: Batch of data y: Batch of targets, if applicable Returns: Negative log-likelihood of the batch \"\"\" return - self . log_prob ( x , y ) forward_and_log_det ( z ) Get observed variable x from list of latent variables z Parameters: Name Type Description Default z List of latent variables required Returns: Type Description Observed variable x, log determinant of Jacobian Source code in normflows/core.py 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 def forward_and_log_det ( self , z ): \"\"\"Get observed variable x from list of latent variables z Args: z: List of latent variables Returns: Observed variable x, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( z [ 0 ]), dtype = z [ 0 ] . dtype , device = z [ 0 ] . device ) for i in range ( len ( self . q0 )): if i == 0 : z_ = z [ 0 ] else : z_ , log_det_ = self . merges [ i - 1 ]([ z_ , z [ i ]]) log_det += log_det_ for flow in self . flows [ i ]: z_ , log_det_ = flow ( z_ ) log_det += log_det_ if self . transform is not None : z_ , log_det_ = self . transform ( z_ ) log_det += log_det_ return z_ , log_det forward_kld ( x , y = None ) Estimates forward KL divergence, see see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required y Batch of targets, if applicable None Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 480 481 482 483 484 485 486 487 488 489 490 def forward_kld ( self , x , y = None ): \"\"\"Estimates forward KL divergence, see see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution y: Batch of targets, if applicable Returns: Estimate of forward KL divergence averaged over batch \"\"\" return - torch . mean ( self . log_prob ( x , y )) inverse_and_log_det ( x ) Get latent variable z from observed variable x Parameters: Name Type Description Default x Observed variable required Returns: Type Description List of latent variables z, log determinant of Jacobian Source code in normflows/core.py 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 def inverse_and_log_det ( self , x ): \"\"\"Get latent variable z from observed variable x Args: x: Observed variable Returns: List of latent variables z, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) if self . transform is not None : x , log_det_ = self . transform . inverse ( x ) log_det += log_det_ z = [ None ] * len ( self . q0 ) for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for flow in reversed ( self . flows [ i ]): x , log_det_ = flow . inverse ( x ) log_det += log_det_ if i == 0 : z [ i ] = x else : [ x , z [ i ]], log_det_ = self . merges [ i - 1 ] . inverse ( x ) log_det += log_det_ return z , log_det load ( path ) Load model from state dict Parameters: Name Type Description Default path Path including filename where to load model from required Source code in normflows/core.py 626 627 628 629 630 631 632 def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) log_prob ( x , y ) Get log probability for batch Parameters: Name Type Description Default x Batch required y Classes of x required Returns: Type Description log probability Source code in normflows/core.py 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = 0 z = x if self . transform is not None : z , log_det = self . transform . inverse ( z ) log_q += log_det for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for j in range ( len ( self . flows [ i ]) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ][ j ] . inverse ( z ) log_q += log_det if i > 0 : [ z , z_ ], log_det = self . merges [ i - 1 ] . inverse ( z ) log_q += log_det else : z_ = z if self . class_cond : log_q += self . q0 [ i ] . log_prob ( z_ , y ) else : log_q += self . q0 [ i ] . log_prob ( z_ ) return log_q reset_temperature () Set temperature values of base distributions back to None Source code in normflows/core.py 649 650 651 652 653 def reset_temperature ( self ): \"\"\" Set temperature values of base distributions back to None \"\"\" self . set_temperature ( None ) sample ( num_samples = 1 , y = None , temperature = None ) Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 y Classes to sample from, will be sampled uniformly if None None temperature Temperature parameter for temp annealed sampling None Returns: Type Description Samples, log probability Source code in normflows/core.py 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 def sample ( self , num_samples = 1 , y = None , temperature = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None temperature: Temperature parameter for temp annealed sampling Returns: Samples, log probability \"\"\" if temperature is not None : self . set_temperature ( temperature ) for i in range ( len ( self . q0 )): if self . class_cond : z_ , log_q_ = self . q0 [ i ]( num_samples , y ) else : z_ , log_q_ = self . q0 [ i ]( num_samples ) if i == 0 : log_q = log_q_ z = z_ else : log_q += log_q_ z , log_det = self . merges [ i - 1 ]([ z , z_ ]) log_q -= log_det for flow in self . flows [ i ]: z , log_det = flow ( z ) log_q -= log_det if self . transform is not None : z , log_det = self . transform ( z ) log_q -= log_det if temperature is not None : self . reset_temperature () return z , log_q save ( path ) Save state dict of model Parameters: Name Type Description Default path Path including filename where to save model required Source code in normflows/core.py 618 619 620 621 622 623 624 def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) set_temperature ( temperature ) Set temperature for temperature a annealed sampling Parameters: Name Type Description Default temperature Temperature parameter required Source code in normflows/core.py 634 635 636 637 638 639 640 641 642 643 644 645 646 647 def set_temperature ( self , temperature ): \"\"\"Set temperature for temperature a annealed sampling Args: temperature: Temperature parameter \"\"\" for q0 in self . q0 : if hasattr ( q0 , \"temperature\" ): q0 . temperature = temperature else : raise NotImplementedError ( \"One base function does not \" \"support temperature annealed sampling\" ) NormalizingFlow Bases: Module Normalizing Flow model to approximate target distribution Source code in normflows/core.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 class NormalizingFlow ( nn . Module ): \"\"\" Normalizing Flow model to approximate target distribution \"\"\" def __init__ ( self , q0 , flows , p = None ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows p: Target distribution \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) self . p = p def forward ( self , z ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z ) return z def forward_and_log_det ( self , z ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z ) log_det += log_d return z , log_det def inverse ( self , x ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x ) return x def inverse_and_log_det ( self , x ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x ) log_det += log_d return x , log_det def forward_kld ( self , x ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return - torch . mean ( log_q ) def reverse_kld ( self , num_samples = 1 , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z ) return torch . mean ( log_q ) - beta * torch . mean ( log_p ) def reverse_alpha_div ( self , num_samples = 1 , alpha = 1 , dreg = False ): \"\"\"Alpha divergence when sampling from q Args: num_samples: Number of samples to draw dreg: Flag whether to use Double Reparametrized Gradient estimator, see [arXiv 1810.04152](https://arxiv.org/abs/1810.04152) Returns: Alpha divergence \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . p . log_prob ( z ) if dreg : w_const = torch . exp ( log_p - log_q ) . detach () z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) w = torch . exp ( log_p - log_q ) w_alpha = w_const ** alpha w_alpha = w_alpha / torch . mean ( w_alpha ) weights = ( 1 - alpha ) * w_alpha + alpha * w_alpha ** 2 loss = - alpha * torch . mean ( weights * torch . log ( w )) else : loss = np . sign ( alpha - 1 ) * torch . logsumexp ( alpha * ( log_p - log_q ), 0 ) return loss def sample ( self , num_samples = 1 ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q def log_prob ( self , x ): \"\"\"Get log probability for batch Args: x: Batch Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return log_q def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) __init__ ( q0 , flows , p = None ) Constructor Parameters: Name Type Description Default q0 Base distribution required flows List of flows required p Target distribution None Source code in normflows/core.py 14 15 16 17 18 19 20 21 22 23 24 25 def __init__ ( self , q0 , flows , p = None ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows p: Target distribution \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) self . p = p forward ( z ) Transforms latent variable z to the flow variable x Parameters: Name Type Description Default z Batch in the latent space required Returns: Type Description Batch in the space of the target distribution Source code in normflows/core.py 27 28 29 30 31 32 33 34 35 36 37 38 def forward ( self , z ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z ) return z forward_and_log_det ( z ) Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Parameters: Name Type Description Default z Batch in the latent space required Returns: Type Description Batch in the space of the target distribution, log determinant of the Jacobian Source code in normflows/core.py 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 def forward_and_log_det ( self , z ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z ) log_det += log_d return z , log_det forward_kld ( x ) Estimates forward KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 def forward_kld ( self , x ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return - torch . mean ( log_q ) inverse ( x ) Transforms flow variable x to the latent variable z Parameters: Name Type Description Default x Batch in the space of the target distribution required Returns: Type Description Batch in the latent space Source code in normflows/core.py 57 58 59 60 61 62 63 64 65 66 67 68 def inverse ( self , x ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x ) return x inverse_and_log_det ( x ) Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Parameters: Name Type Description Default x Batch in the space of the target distribution required Returns: Type Description Batch in the latent space, log determinant of the Jacobian Source code in normflows/core.py 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 def inverse_and_log_det ( self , x ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x ) log_det += log_d return x , log_det load ( path ) Load model from state dict Parameters: Name Type Description Default path Path including filename where to load model from required Source code in normflows/core.py 207 208 209 210 211 212 213 def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) log_prob ( x ) Get log probability for batch Parameters: Name Type Description Default x Batch required Returns: Type Description log probability Source code in normflows/core.py 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 def log_prob ( self , x ): \"\"\"Get log probability for batch Args: x: Batch Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return log_q reverse_alpha_div ( num_samples = 1 , alpha = 1 , dreg = False ) Alpha divergence when sampling from q Parameters: Name Type Description Default num_samples Number of samples to draw 1 dreg Flag whether to use Double Reparametrized Gradient estimator, see arXiv 1810.04152 False Returns: Type Description Alpha divergence Source code in normflows/core.py 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 def reverse_alpha_div ( self , num_samples = 1 , alpha = 1 , dreg = False ): \"\"\"Alpha divergence when sampling from q Args: num_samples: Number of samples to draw dreg: Flag whether to use Double Reparametrized Gradient estimator, see [arXiv 1810.04152](https://arxiv.org/abs/1810.04152) Returns: Alpha divergence \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . p . log_prob ( z ) if dreg : w_const = torch . exp ( log_p - log_q ) . detach () z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) w = torch . exp ( log_p - log_q ) w_alpha = w_const ** alpha w_alpha = w_alpha / torch . mean ( w_alpha ) weights = ( 1 - alpha ) * w_alpha + alpha * w_alpha ** 2 loss = - alpha * torch . mean ( weights * torch . log ( w )) else : loss = np . sign ( alpha - 1 ) * torch . logsumexp ( alpha * ( log_p - log_q ), 0 ) return loss reverse_kld ( num_samples = 1 , beta = 1.0 , score_fn = True ) Estimates reverse KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default num_samples Number of samples to draw from base distribution 1 beta Annealing parameter, see arXiv 1505.05770 1.0 score_fn Flag whether to include score function in gradient, see arXiv 1703.09194 True Returns: Type Description Estimate of the reverse KL divergence averaged over latent samples Source code in normflows/core.py 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 def reverse_kld ( self , num_samples = 1 , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z ) return torch . mean ( log_q ) - beta * torch . mean ( log_p ) sample ( num_samples = 1 ) Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 Returns: Type Description Samples, log probability Source code in normflows/core.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def sample ( self , num_samples = 1 ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q save ( path ) Save state dict of model Parameters: Name Type Description Default path Path including filename where to save model required Source code in normflows/core.py 199 200 201 202 203 204 205 def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) NormalizingFlowVAE Bases: Module VAE using normalizing flows to express approximate distribution Source code in normflows/core.py 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 class NormalizingFlowVAE ( nn . Module ): \"\"\" VAE using normalizing flows to express approximate distribution \"\"\" def __init__ ( self , prior , q0 = distributions . Dirac (), flows = None , decoder = None ): \"\"\"Constructor of normalizing flow model Args: prior: Prior distribution of te VAE, i.e. Gaussian decoder: Optional decoder flows: Flows to transform output of base encoder q0: Base Encoder \"\"\" super () . __init__ () self . prior = prior self . decoder = decoder self . flows = nn . ModuleList ( flows ) self . q0 = q0 def forward ( self , x , num_samples = 1 ): \"\"\"Takes data batch, samples num_samples for each data point from base distribution Args: x: data batch num_samples: number of samples to draw for each data point Returns: latent variables for each batch and sample, log_q, and log_p \"\"\" z , log_q = self . q0 ( x , num_samples = num_samples ) # Flatten batch and sample dim z = z . view ( - 1 , * z . size ()[ 2 :]) log_q = log_q . view ( - 1 , * log_q . size ()[ 2 :]) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . prior . log_prob ( z ) if self . decoder is not None : log_p += self . decoder . log_prob ( x , z ) # Separate batch and sample dimension again z = z . view ( - 1 , num_samples , * z . size ()[ 1 :]) log_q = log_q . view ( - 1 , num_samples , * log_q . size ()[ 1 :]) log_p = log_p . view ( - 1 , num_samples , * log_p . size ()[ 1 :]) return z , log_q , log_p __init__ ( prior , q0 = distributions . Dirac (), flows = None , decoder = None ) Constructor of normalizing flow model Parameters: Name Type Description Default prior Prior distribution of te VAE, i.e. Gaussian required decoder Optional decoder None flows Flows to transform output of base encoder None q0 Base Encoder Dirac () Source code in normflows/core.py 661 662 663 664 665 666 667 668 669 670 671 672 673 674 def __init__ ( self , prior , q0 = distributions . Dirac (), flows = None , decoder = None ): \"\"\"Constructor of normalizing flow model Args: prior: Prior distribution of te VAE, i.e. Gaussian decoder: Optional decoder flows: Flows to transform output of base encoder q0: Base Encoder \"\"\" super () . __init__ () self . prior = prior self . decoder = decoder self . flows = nn . ModuleList ( flows ) self . q0 = q0 forward ( x , num_samples = 1 ) Takes data batch, samples num_samples for each data point from base distribution Parameters: Name Type Description Default x data batch required num_samples number of samples to draw for each data point 1 Returns: Type Description latent variables for each batch and sample, log_q, and log_p Source code in normflows/core.py 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 def forward ( self , x , num_samples = 1 ): \"\"\"Takes data batch, samples num_samples for each data point from base distribution Args: x: data batch num_samples: number of samples to draw for each data point Returns: latent variables for each batch and sample, log_q, and log_p \"\"\" z , log_q = self . q0 ( x , num_samples = num_samples ) # Flatten batch and sample dim z = z . view ( - 1 , * z . size ()[ 2 :]) log_q = log_q . view ( - 1 , * log_q . size ()[ 2 :]) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . prior . log_prob ( z ) if self . decoder is not None : log_p += self . decoder . log_prob ( x , z ) # Separate batch and sample dimension again z = z . view ( - 1 , num_samples , * z . size ()[ 1 :]) log_q = log_q . view ( - 1 , num_samples , * log_q . size ()[ 1 :]) log_p = log_p . view ( - 1 , num_samples , * log_p . size ()[ 1 :]) return z , log_q , log_p core_test distributions base AffineGaussian Bases: BaseDistribution Diagonal Gaussian an affine constant transformation applied to it, can be class conditional or not Source code in normflows/distributions/base.py 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 class AffineGaussian ( BaseDistribution ): \"\"\" Diagonal Gaussian an affine constant transformation applied to it, can be class conditional or not \"\"\" def __init__ ( self , shape , affine_shape , num_classes = None ): \"\"\"Constructor Args: shape: Shape of the variables affine_shape: Shape of the parameters in the affine transformation num_classes: Number of classes if the base is class conditional, None otherwise \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . affine_shape = affine_shape self . num_classes = num_classes self . class_cond = num_classes is not None # Affine transformation if self . class_cond : self . transform = flows . CCAffineConst ( self . affine_shape , self . num_classes ) else : self . transform = flows . AffineConstFlow ( self . affine_shape ) # Temperature parameter for annealed sampling self . temperature = None def forward ( self , num_samples = 1 , y = None ): dtype = self . transform . s . dtype device = self . transform . s . device if self . class_cond : if y is not None : num_samples = len ( y ) else : y = torch . randint ( self . num_classes , ( num_samples ,), device = device ) if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = dtype , device = device ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot if self . temperature is not None : log_scale = np . log ( self . temperature ) else : log_scale = 0.0 # Sample eps = torch . randn (( num_samples ,) + self . shape , dtype = dtype , device = device ) z = np . exp ( log_scale ) * eps # Get log prob log_p = ( - 0.5 * self . d * np . log ( 2 * np . pi ) - self . d * log_scale - 0.5 * torch . sum ( torch . pow ( eps , 2 ), dim = self . sum_dim ) ) # Apply transform if self . class_cond : z , log_det = self . transform ( z , y ) else : z , log_det = self . transform ( z ) log_p -= log_det return z , log_p def log_prob ( self , z , y = None ): # Perpare onehot encoding of class if needed if self . class_cond : if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = self . transform . s . dtype , device = self . transform . s . device , ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot if self . temperature is not None : log_scale = np . log ( self . temperature ) else : log_scale = 0.0 # Get log prob if self . class_cond : z , log_p = self . transform . inverse ( z , y ) else : z , log_p = self . transform . inverse ( z ) z = z / np . exp ( log_scale ) log_p = ( log_p - self . d * log_scale - 0.5 * self . d * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . pow ( z , 2 ), dim = self . sum_dim ) ) return log_p __init__ ( shape , affine_shape , num_classes = None ) Constructor Parameters: Name Type Description Default shape Shape of the variables required affine_shape Shape of the parameters in the affine transformation required num_classes Number of classes if the base is class conditional, None otherwise None Source code in normflows/distributions/base.py 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 def __init__ ( self , shape , affine_shape , num_classes = None ): \"\"\"Constructor Args: shape: Shape of the variables affine_shape: Shape of the parameters in the affine transformation num_classes: Number of classes if the base is class conditional, None otherwise \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . affine_shape = affine_shape self . num_classes = num_classes self . class_cond = num_classes is not None # Affine transformation if self . class_cond : self . transform = flows . CCAffineConst ( self . affine_shape , self . num_classes ) else : self . transform = flows . AffineConstFlow ( self . affine_shape ) # Temperature parameter for annealed sampling self . temperature = None BaseDistribution Bases: Module Base distribution of a flow-based model Parameters do not depend of target variable (as is the case for a VAE encoder) Source code in normflows/distributions/base.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 class BaseDistribution ( nn . Module ): \"\"\" Base distribution of a flow-based model Parameters do not depend of target variable (as is the case for a VAE encoder) \"\"\" def __init__ ( self ): super () . __init__ () def forward ( self , num_samples = 1 ): \"\"\"Samples from base distribution and calculates log probability Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution, log probability \"\"\" raise NotImplementedError def log_prob ( self , z ): \"\"\"Calculate log probability of batch of samples Args: z: Batch of random variables to determine log probability for Returns: log probability for each batch element \"\"\" raise NotImplementedError def sample ( self , num_samples = 1 , ** kwargs ): \"\"\"Samples from base distribution Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution \"\"\" z , _ = self . forward ( num_samples , ** kwargs ) return z forward ( num_samples = 1 ) Samples from base distribution and calculates log probability Parameters: Name Type Description Default num_samples Number of samples to draw from the distriubtion 1 Returns: Type Description Samples drawn from the distribution, log probability Source code in normflows/distributions/base.py 17 18 19 20 21 22 23 24 25 26 def forward ( self , num_samples = 1 ): \"\"\"Samples from base distribution and calculates log probability Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution, log probability \"\"\" raise NotImplementedError log_prob ( z ) Calculate log probability of batch of samples Parameters: Name Type Description Default z Batch of random variables to determine log probability for required Returns: Type Description log probability for each batch element Source code in normflows/distributions/base.py 28 29 30 31 32 33 34 35 36 37 def log_prob ( self , z ): \"\"\"Calculate log probability of batch of samples Args: z: Batch of random variables to determine log probability for Returns: log probability for each batch element \"\"\" raise NotImplementedError sample ( num_samples = 1 , ** kwargs ) Samples from base distribution Parameters: Name Type Description Default num_samples Number of samples to draw from the distriubtion 1 Returns: Type Description Samples drawn from the distribution Source code in normflows/distributions/base.py 39 40 41 42 43 44 45 46 47 48 49 def sample ( self , num_samples = 1 , ** kwargs ): \"\"\"Samples from base distribution Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution \"\"\" z , _ = self . forward ( num_samples , ** kwargs ) return z ClassCondDiagGaussian Bases: BaseDistribution Class conditional multivariate Gaussian distribution with diagonal covariance matrix Source code in normflows/distributions/base.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 class ClassCondDiagGaussian ( BaseDistribution ): \"\"\" Class conditional multivariate Gaussian distribution with diagonal covariance matrix \"\"\" def __init__ ( self , shape , num_classes ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension num_classes: Number of classes to condition on \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . perm = [ self . n_dim ] + list ( range ( self . n_dim )) self . d = np . prod ( shape ) self . num_classes = num_classes self . loc = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . log_scale = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . temperature = None # Temperature parameter for annealed sampling def forward ( self , num_samples = 1 , y = None ): if y is not None : num_samples = len ( y ) else : y = torch . randint ( self . num_classes , ( num_samples ,), device = self . loc . device ) if y . dim () == 1 : y_onehot = torch . zeros ( ( self . num_classes , num_samples ), dtype = self . loc . dtype , device = self . loc . device , ) y_onehot . scatter_ ( 0 , y [ None ], 1 ) y = y_onehot else : y = y . t () eps = torch . randn ( ( num_samples ,) + self . shape , dtype = self . loc . dtype , device = self . loc . device ) loc = ( self . loc @ y ) . permute ( * self . perm ) log_scale = ( self . log_scale @ y ) . permute ( * self . perm ) if self . temperature is not None : log_scale = np . log ( self . temperature ) + log_scale z = loc + torch . exp ( log_scale ) * eps log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow ( eps , 2 ), list ( range ( 1 , self . n_dim + 1 )) ) return z , log_p def log_prob ( self , z , y ): if y . dim () == 1 : y_onehot = torch . zeros ( ( self . num_classes , len ( y )), dtype = self . loc . dtype , device = self . loc . device ) y_onehot . scatter_ ( 0 , y [ None ], 1 ) y = y_onehot else : y = y . t () loc = ( self . loc @ y ) . permute ( * self . perm ) log_scale = ( self . log_scale @ y ) . permute ( * self . perm ) if self . temperature is not None : log_scale = np . log ( self . temperature ) + log_scale log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow (( z - loc ) / torch . exp ( log_scale ), 2 ), list ( range ( 1 , self . n_dim + 1 )), ) return log_p __init__ ( shape , num_classes ) Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required num_classes Number of classes to condition on required Source code in normflows/distributions/base.py 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 def __init__ ( self , shape , num_classes ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension num_classes: Number of classes to condition on \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . perm = [ self . n_dim ] + list ( range ( self . n_dim )) self . d = np . prod ( shape ) self . num_classes = num_classes self . loc = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . log_scale = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . temperature = None # Temperature parameter for annealed sampling ConditionalDiagGaussian Bases: BaseDistribution Conditional multivariate Gaussian distribution with diagonal covariance matrix, parameters are obtained by a context encoder, context meaning the variable to condition on Source code in normflows/distributions/base.py 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 class ConditionalDiagGaussian ( BaseDistribution ): \"\"\" Conditional multivariate Gaussian distribution with diagonal covariance matrix, parameters are obtained by a context encoder, context meaning the variable to condition on \"\"\" def __init__ ( self , shape , context_encoder ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension context_encoder: Computes mean and log of the standard deviation of the Gaussian, mean is the first half of the last dimension of the encoder output, log of the standard deviation the second half \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . context_encoder = context_encoder def forward ( self , num_samples = 1 , context = None ): encoder_output = self . context_encoder ( context ) split_ind = encoder_output . shape [ - 1 ] // 2 mean = encoder_output [ ... , : split_ind ] log_scale = encoder_output [ ... , split_ind :] eps = torch . randn ( ( num_samples ,) + self . shape , dtype = mean . dtype , device = mean . device ) z = mean + torch . exp ( log_scale ) * eps log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow ( eps , 2 ), list ( range ( 1 , self . n_dim + 1 )) ) return z , log_p def log_prob ( self , z , context = None ): encoder_output = self . context_encoder ( context ) split_ind = encoder_output . shape [ - 1 ] // 2 mean = encoder_output [ ... , : split_ind ] log_scale = encoder_output [ ... , split_ind :] log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow (( z - mean ) / torch . exp ( log_scale ), 2 ), list ( range ( 1 , self . n_dim + 1 )), ) return log_p __init__ ( shape , context_encoder ) Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required context_encoder Computes mean and log of the standard deviation required Source code in normflows/distributions/base.py 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 def __init__ ( self , shape , context_encoder ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension context_encoder: Computes mean and log of the standard deviation of the Gaussian, mean is the first half of the last dimension of the encoder output, log of the standard deviation the second half \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . context_encoder = context_encoder DiagGaussian Bases: BaseDistribution Multivariate Gaussian distribution with diagonal covariance matrix Source code in normflows/distributions/base.py 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 class DiagGaussian ( BaseDistribution ): \"\"\" Multivariate Gaussian distribution with diagonal covariance matrix \"\"\" def __init__ ( self , shape , trainable = True ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension trainable: Flag whether to use trainable or fixed parameters \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) if trainable : self . loc = nn . Parameter ( torch . zeros ( 1 , * self . shape )) self . log_scale = nn . Parameter ( torch . zeros ( 1 , * self . shape )) else : self . register_buffer ( \"loc\" , torch . zeros ( 1 , * self . shape )) self . register_buffer ( \"log_scale\" , torch . zeros ( 1 , * self . shape )) self . temperature = None # Temperature parameter for annealed sampling def forward ( self , num_samples = 1 , context = None ): eps = torch . randn ( ( num_samples ,) + self . shape , dtype = self . loc . dtype , device = self . loc . device ) if self . temperature is None : log_scale = self . log_scale else : log_scale = self . log_scale + np . log ( self . temperature ) z = self . loc + torch . exp ( log_scale ) * eps log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow ( eps , 2 ), list ( range ( 1 , self . n_dim + 1 )) ) return z , log_p def log_prob ( self , z , context = None ): if self . temperature is None : log_scale = self . log_scale else : log_scale = self . log_scale + np . log ( self . temperature ) log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow (( z - self . loc ) / torch . exp ( log_scale ), 2 ), list ( range ( 1 , self . n_dim + 1 )), ) return log_p __init__ ( shape , trainable = True ) Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required trainable Flag whether to use trainable or fixed parameters True Source code in normflows/distributions/base.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 def __init__ ( self , shape , trainable = True ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension trainable: Flag whether to use trainable or fixed parameters \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) if trainable : self . loc = nn . Parameter ( torch . zeros ( 1 , * self . shape )) self . log_scale = nn . Parameter ( torch . zeros ( 1 , * self . shape )) else : self . register_buffer ( \"loc\" , torch . zeros ( 1 , * self . shape )) self . register_buffer ( \"log_scale\" , torch . zeros ( 1 , * self . shape )) self . temperature = None # Temperature parameter for annealed sampling GaussianMixture Bases: BaseDistribution Mixture of Gaussians with diagonal covariance matrix Source code in normflows/distributions/base.py 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 class GaussianMixture ( BaseDistribution ): \"\"\" Mixture of Gaussians with diagonal covariance matrix \"\"\" def __init__ ( self , n_modes , dim , loc = None , scale = None , weights = None , trainable = True ): \"\"\"Constructor Args: n_modes: Number of modes of the mixture model dim: Number of dimensions of each Gaussian loc: List of mean values scale: List of diagonals of the covariance matrices weights: List of mode probabilities trainable: Flag, if true parameters will be optimized during training \"\"\" super () . __init__ () self . n_modes = n_modes self . dim = dim if loc is None : loc = np . random . randn ( self . n_modes , self . dim ) loc = np . array ( loc )[ None , ... ] if scale is None : scale = np . ones (( self . n_modes , self . dim )) scale = np . array ( scale )[ None , ... ] if weights is None : weights = np . ones ( self . n_modes ) weights = np . array ( weights )[ None , ... ] weights /= weights . sum ( 1 ) if trainable : self . loc = nn . Parameter ( torch . tensor ( 1.0 * loc )) self . log_scale = nn . Parameter ( torch . tensor ( np . log ( 1.0 * scale ))) self . weight_scores = nn . Parameter ( torch . tensor ( np . log ( 1.0 * weights ))) else : self . register_buffer ( \"loc\" , torch . tensor ( 1.0 * loc )) self . register_buffer ( \"log_scale\" , torch . tensor ( np . log ( 1.0 * scale ))) self . register_buffer ( \"weight_scores\" , torch . tensor ( np . log ( 1.0 * weights ))) def forward ( self , num_samples = 1 ): # Get weights weights = torch . softmax ( self . weight_scores , 1 ) # Sample mode indices mode = torch . multinomial ( weights [ 0 , :], num_samples , replacement = True ) mode_1h = nn . functional . one_hot ( mode , self . n_modes ) mode_1h = mode_1h [ ... , None ] # Get samples eps_ = torch . randn ( num_samples , self . dim , dtype = self . loc . dtype , device = self . loc . device ) scale_sample = torch . sum ( torch . exp ( self . log_scale ) * mode_1h , 1 ) loc_sample = torch . sum ( self . loc * mode_1h , 1 ) z = eps_ * scale_sample + loc_sample # Compute log probability eps = ( z [:, None , :] - self . loc ) / torch . exp ( self . log_scale ) log_p = ( - 0.5 * self . dim * np . log ( 2 * np . pi ) + torch . log ( weights ) - 0.5 * torch . sum ( torch . pow ( eps , 2 ), 2 ) - torch . sum ( self . log_scale , 2 ) ) log_p = torch . logsumexp ( log_p , 1 ) return z , log_p def log_prob ( self , z ): # Get weights weights = torch . softmax ( self . weight_scores , 1 ) # Compute log probability eps = ( z [:, None , :] - self . loc ) / torch . exp ( self . log_scale ) log_p = ( - 0.5 * self . dim * np . log ( 2 * np . pi ) + torch . log ( weights ) - 0.5 * torch . sum ( torch . pow ( eps , 2 ), 2 ) - torch . sum ( self . log_scale , 2 ) ) log_p = torch . logsumexp ( log_p , 1 ) return log_p __init__ ( n_modes , dim , loc = None , scale = None , weights = None , trainable = True ) Constructor Parameters: Name Type Description Default n_modes Number of modes of the mixture model required dim Number of dimensions of each Gaussian required loc List of mean values None scale List of diagonals of the covariance matrices None weights List of mode probabilities None trainable Flag, if true parameters will be optimized during training True Source code in normflows/distributions/base.py 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 def __init__ ( self , n_modes , dim , loc = None , scale = None , weights = None , trainable = True ): \"\"\"Constructor Args: n_modes: Number of modes of the mixture model dim: Number of dimensions of each Gaussian loc: List of mean values scale: List of diagonals of the covariance matrices weights: List of mode probabilities trainable: Flag, if true parameters will be optimized during training \"\"\" super () . __init__ () self . n_modes = n_modes self . dim = dim if loc is None : loc = np . random . randn ( self . n_modes , self . dim ) loc = np . array ( loc )[ None , ... ] if scale is None : scale = np . ones (( self . n_modes , self . dim )) scale = np . array ( scale )[ None , ... ] if weights is None : weights = np . ones ( self . n_modes ) weights = np . array ( weights )[ None , ... ] weights /= weights . sum ( 1 ) if trainable : self . loc = nn . Parameter ( torch . tensor ( 1.0 * loc )) self . log_scale = nn . Parameter ( torch . tensor ( np . log ( 1.0 * scale ))) self . weight_scores = nn . Parameter ( torch . tensor ( np . log ( 1.0 * weights ))) else : self . register_buffer ( \"loc\" , torch . tensor ( 1.0 * loc )) self . register_buffer ( \"log_scale\" , torch . tensor ( np . log ( 1.0 * scale ))) self . register_buffer ( \"weight_scores\" , torch . tensor ( np . log ( 1.0 * weights ))) GaussianPCA Bases: BaseDistribution Gaussian distribution resulting from linearly mapping a normal distributed latent variable describing the \"content of the target\" Source code in normflows/distributions/base.py 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 class GaussianPCA ( BaseDistribution ): \"\"\" Gaussian distribution resulting from linearly mapping a normal distributed latent variable describing the \"content of the target\" \"\"\" def __init__ ( self , dim , latent_dim = None , sigma = 0.1 ): \"\"\"Constructor Args: dim: Number of dimensions of the flow variables latent_dim: Number of dimensions of the latent \"content\" variable; if None it is set equal to dim sigma: Noise level \"\"\" super () . __init__ () self . dim = dim if latent_dim is None : self . latent_dim = dim else : self . latent_dim = latent_dim self . loc = nn . Parameter ( torch . zeros ( 1 , dim )) self . W = nn . Parameter ( torch . randn ( latent_dim , dim )) self . log_sigma = nn . Parameter ( torch . tensor ( np . log ( sigma ))) def forward ( self , num_samples = 1 ): eps = torch . randn ( num_samples , self . latent_dim , dtype = self . loc . dtype , device = self . loc . device ) z_ = torch . matmul ( eps , self . W ) z = z_ + self . loc Sig = torch . matmul ( self . W . T , self . W ) + torch . exp ( self . log_sigma * 2 ) * torch . eye ( self . dim , dtype = self . loc . dtype , device = self . loc . device ) log_p = ( self . dim / 2 * np . log ( 2 * np . pi ) - 0.5 * torch . det ( Sig ) - 0.5 * torch . sum ( z_ * torch . matmul ( z_ , torch . inverse ( Sig )), 1 ) ) return z , log_p def log_prob ( self , z ): z_ = z - self . loc Sig = torch . matmul ( self . W . T , self . W ) + torch . exp ( self . log_sigma * 2 ) * torch . eye ( self . dim , dtype = self . loc . dtype , device = self . loc . device ) log_p = ( self . dim / 2 * np . log ( 2 * np . pi ) - 0.5 * torch . det ( Sig ) - 0.5 * torch . sum ( z_ * torch . matmul ( z_ , torch . inverse ( Sig )), 1 ) ) return log_p __init__ ( dim , latent_dim = None , sigma = 0.1 ) Constructor Parameters: Name Type Description Default dim Number of dimensions of the flow variables required latent_dim Number of dimensions of the latent \"content\" variable; if None it is set equal to dim None sigma Noise level 0.1 Source code in normflows/distributions/base.py 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 def __init__ ( self , dim , latent_dim = None , sigma = 0.1 ): \"\"\"Constructor Args: dim: Number of dimensions of the flow variables latent_dim: Number of dimensions of the latent \"content\" variable; if None it is set equal to dim sigma: Noise level \"\"\" super () . __init__ () self . dim = dim if latent_dim is None : self . latent_dim = dim else : self . latent_dim = latent_dim self . loc = nn . Parameter ( torch . zeros ( 1 , dim )) self . W = nn . Parameter ( torch . randn ( latent_dim , dim )) self . log_sigma = nn . Parameter ( torch . tensor ( np . log ( sigma ))) GlowBase Bases: BaseDistribution Base distribution of the Glow model, i.e. Diagonal Gaussian with one mean and log scale for each channel Source code in normflows/distributions/base.py 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 class GlowBase ( BaseDistribution ): \"\"\" Base distribution of the Glow model, i.e. Diagonal Gaussian with one mean and log scale for each channel \"\"\" def __init__ ( self , shape , num_classes = None , logscale_factor = 3.0 ): \"\"\"Constructor Args: shape: Shape of the variables num_classes: Number of classes if the base is class conditional, None otherwise logscale_factor: Scaling factor for mean and log variance \"\"\" super () . __init__ () # Save shape and related statistics if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . num_pix = np . prod ( shape [ 1 :]) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . num_classes = num_classes self . class_cond = num_classes is not None self . logscale_factor = logscale_factor # Set up parameters self . loc = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . loc_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) # Class conditional parameter if needed if self . class_cond : self . loc_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ])) self . log_scale_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ]) ) # Temperature parameter for annealed sampling self . temperature = None def forward ( self , num_samples = 1 , y = None ): # Prepare parameter loc = self . loc * torch . exp ( self . loc_logs * self . logscale_factor ) log_scale = self . log_scale * torch . exp ( self . log_scale_logs * self . logscale_factor ) if self . class_cond : if y is not None : num_samples = len ( y ) else : y = torch . randint ( self . num_classes , ( num_samples ,), device = self . loc . device ) if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = self . loc . dtype , device = self . loc . device , ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot loc = loc + ( y @ self . loc_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) log_scale = log_scale + ( y @ self . log_scale_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) if self . temperature is not None : log_scale = log_scale + np . log ( self . temperature ) # Sample eps = torch . randn ( ( num_samples ,) + self . shape , dtype = self . loc . dtype , device = self . loc . device ) z = loc + torch . exp ( log_scale ) * eps # Get log prob log_p = ( - 0.5 * self . d * np . log ( 2 * np . pi ) - self . num_pix * torch . sum ( log_scale , dim = self . sum_dim ) - 0.5 * torch . sum ( torch . pow ( eps , 2 ), dim = self . sum_dim ) ) return z , log_p def log_prob ( self , z , y = None ): # Perpare parameter loc = self . loc * torch . exp ( self . loc_logs * self . logscale_factor ) log_scale = self . log_scale * torch . exp ( self . log_scale_logs * self . logscale_factor ) if self . class_cond : if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = self . loc . dtype , device = self . loc . device , ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot loc = loc + ( y @ self . loc_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) log_scale = log_scale + ( y @ self . log_scale_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) if self . temperature is not None : log_scale = log_scale + np . log ( self . temperature ) # Get log prob log_p = ( - 0.5 * self . d * np . log ( 2 * np . pi ) - self . num_pix * torch . sum ( log_scale , dim = self . sum_dim ) - 0.5 * torch . sum ( torch . pow (( z - loc ) / torch . exp ( log_scale ), 2 ), dim = self . sum_dim ) ) return log_p __init__ ( shape , num_classes = None , logscale_factor = 3.0 ) Constructor Parameters: Name Type Description Default shape Shape of the variables required num_classes Number of classes if the base is class conditional, None otherwise None logscale_factor Scaling factor for mean and log variance 3.0 Source code in normflows/distributions/base.py 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 def __init__ ( self , shape , num_classes = None , logscale_factor = 3.0 ): \"\"\"Constructor Args: shape: Shape of the variables num_classes: Number of classes if the base is class conditional, None otherwise logscale_factor: Scaling factor for mean and log variance \"\"\" super () . __init__ () # Save shape and related statistics if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . num_pix = np . prod ( shape [ 1 :]) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . num_classes = num_classes self . class_cond = num_classes is not None self . logscale_factor = logscale_factor # Set up parameters self . loc = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . loc_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) # Class conditional parameter if needed if self . class_cond : self . loc_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ])) self . log_scale_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ]) ) # Temperature parameter for annealed sampling self . temperature = None Uniform Bases: BaseDistribution Multivariate uniform distribution Source code in normflows/distributions/base.py 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 class Uniform ( BaseDistribution ): \"\"\" Multivariate uniform distribution \"\"\" def __init__ ( self , shape , low =- 1.0 , high = 1.0 ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension low: Lower bound of uniform distribution high: Upper bound of uniform distribution \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . d = np . prod ( shape ) self . low = torch . tensor ( low ) self . high = torch . tensor ( high ) self . log_prob_val = - self . d * np . log ( self . high - self . low ) def forward ( self , num_samples = 1 , context = None ): eps = torch . rand ( ( num_samples ,) + self . shape , dtype = self . low . dtype , device = self . low . device ) z = self . low + ( self . high - self . low ) * eps log_p = self . log_prob_val * torch . ones ( num_samples , device = self . low . device ) return z , log_p def log_prob ( self , z , context = None ): log_p = self . log_prob_val * torch . ones ( z . shape [ 0 ], device = z . device ) out_range = torch . logical_or ( z < self . low , z > self . high ) ind_inf = torch . any ( torch . reshape ( out_range , ( z . shape [ 0 ], - 1 )), dim =- 1 ) log_p [ ind_inf ] = - np . inf return log_p __init__ ( shape , low =- 1.0 , high = 1.0 ) Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required low Lower bound of uniform distribution -1.0 high Upper bound of uniform distribution 1.0 Source code in normflows/distributions/base.py 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def __init__ ( self , shape , low =- 1.0 , high = 1.0 ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension low: Lower bound of uniform distribution high: Upper bound of uniform distribution \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . d = np . prod ( shape ) self . low = torch . tensor ( low ) self . high = torch . tensor ( high ) self . log_prob_val = - self . d * np . log ( self . high - self . low ) UniformGaussian Bases: BaseDistribution Distribution of a 1D random variable with some entries having a uniform and others a Gaussian distribution Source code in normflows/distributions/base.py 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 class UniformGaussian ( BaseDistribution ): \"\"\" Distribution of a 1D random variable with some entries having a uniform and others a Gaussian distribution \"\"\" def __init__ ( self , ndim , ind , scale = None ): \"\"\"Constructor Args: ndim: Int, number of dimensions ind: Iterable, indices of uniformly distributed entries scale: Iterable, standard deviation of Gaussian or width of uniform distribution \"\"\" super () . __init__ () self . ndim = ndim if isinstance ( ind , int ): ind = [ ind ] # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) if scale is None : self . register_buffer ( \"scale\" , torch . ones ( self . ndim )) else : self . register_buffer ( \"scale\" , scale ) def forward ( self , num_samples = 1 , context = None ): z = self . sample ( num_samples ) return z , self . log_prob ( z ) def sample ( self , num_samples = 1 , context = None ): eps_u = ( torch . rand ( ( num_samples , len ( self . ind )), dtype = self . scale . dtype , device = self . scale . device , ) - 0.5 ) eps_g = torch . randn ( ( num_samples , len ( self . ind_ )), dtype = self . scale . dtype , device = self . scale . device , ) z = torch . cat (( eps_u , eps_g ), - 1 ) z = z [ ... , self . inv_perm ] return self . scale * z def log_prob ( self , z , context = None ): log_p_u = torch . broadcast_to ( - torch . log ( self . scale [ self . ind ]), ( len ( z ), - 1 )) log_p_g = ( - 0.5 * np . log ( 2 * np . pi ) - torch . log ( self . scale [ self . ind_ ]) - 0.5 * torch . pow ( z [ ... , self . ind_ ] / self . scale [ self . ind_ ], 2 ) ) return torch . sum ( log_p_u , - 1 ) + torch . sum ( log_p_g , - 1 ) __init__ ( ndim , ind , scale = None ) Constructor Parameters: Name Type Description Default ndim Int, number of dimensions required ind Iterable, indices of uniformly distributed entries required scale Iterable, standard deviation of Gaussian or width of uniform distribution None Source code in normflows/distributions/base.py 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 def __init__ ( self , ndim , ind , scale = None ): \"\"\"Constructor Args: ndim: Int, number of dimensions ind: Iterable, indices of uniformly distributed entries scale: Iterable, standard deviation of Gaussian or width of uniform distribution \"\"\" super () . __init__ () self . ndim = ndim if isinstance ( ind , int ): ind = [ ind ] # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) if scale is None : self . register_buffer ( \"scale\" , torch . ones ( self . ndim )) else : self . register_buffer ( \"scale\" , scale ) base_test decoder BaseDecoder Bases: Module Source code in normflows/distributions/decoder.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 class BaseDecoder ( nn . Module ): def __init__ ( self ): super () . __init__ () def forward ( self , z ): \"\"\"Decodes z to x Args: z: latent variable Returns: x, std of x \"\"\" raise NotImplementedError def log_prob ( self , x , z ): \"\"\"Log probability Args: x: observable z: latent variable Returns: log(p) of x given z \"\"\" raise NotImplementedError forward ( z ) Decodes z to x Parameters: Name Type Description Default z latent variable required Returns: Type Description x, std of x Source code in normflows/distributions/decoder.py 10 11 12 13 14 15 16 17 18 19 def forward ( self , z ): \"\"\"Decodes z to x Args: z: latent variable Returns: x, std of x \"\"\" raise NotImplementedError log_prob ( x , z ) Log probability Parameters: Name Type Description Default x observable required z latent variable required Returns: Type Description log(p) of x given z Source code in normflows/distributions/decoder.py 21 22 23 24 25 26 27 28 29 30 31 def log_prob ( self , x , z ): \"\"\"Log probability Args: x: observable z: latent variable Returns: log(p) of x given z \"\"\" raise NotImplementedError NNBernoulliDecoder Bases: BaseDecoder BaseDecoder representing a Bernoulli distribution with mean parametrized by a NN Source code in normflows/distributions/decoder.py 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 class NNBernoulliDecoder ( BaseDecoder ): \"\"\" BaseDecoder representing a Bernoulli distribution with mean parametrized by a NN \"\"\" def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) \"\"\" super () . __init__ () self . net = net def forward ( self , z ): mean = torch . sigmoid ( self . net ( z )) return mean def log_prob ( self , x , z ): score = self . net ( z ) if len ( z ) > len ( x ): x = x . unsqueeze ( 1 ) x = x . repeat ( 1 , z . size ()[ 0 ] // x . size ()[ 0 ], * (( x . dim () - 2 ) * [ 1 ])) . view ( - 1 , * x . size ()[ 2 :] ) log_sig = lambda a : - torch . relu ( - a ) - torch . log ( 1 + torch . exp ( - torch . abs ( a ))) log_p = torch . sum ( x * log_sig ( score ) + ( 1 - x ) * log_sig ( - score ), list ( range ( 1 , x . dim ())) ) return log_p __init__ ( net ) Constructor Parameters: Name Type Description Default net neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) required Source code in normflows/distributions/decoder.py 78 79 80 81 82 83 84 85 def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) \"\"\" super () . __init__ () self . net = net NNDiagGaussianDecoder Bases: BaseDecoder BaseDecoder representing a diagonal Gaussian distribution with mean and std parametrized by a NN Source code in normflows/distributions/decoder.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 class NNDiagGaussianDecoder ( BaseDecoder ): \"\"\" BaseDecoder representing a diagonal Gaussian distribution with mean and std parametrized by a NN \"\"\" def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean and standard deviation of diagonal Gaussian \"\"\" super () . __init__ () self . net = net def forward ( self , z ): mean_std = self . net ( z ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] std = torch . exp ( 0.5 * mean_std [:, n_hidden :, ... ]) return mean , std def log_prob ( self , x , z ): mean_std = self . net ( z ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] var = torch . exp ( mean_std [:, n_hidden :, ... ]) if len ( z ) > len ( x ): x = x . unsqueeze ( 1 ) x = x . repeat ( 1 , z . size ()[ 0 ] // x . size ()[ 0 ], * (( x . dim () - 2 ) * [ 1 ])) . view ( - 1 , * x . size ()[ 2 :] ) log_p = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 1 :])) * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . log ( var ) + ( x - mean ) ** 2 / var , list ( range ( 1 , z . dim ())) ) return log_p __init__ ( net ) Constructor Parameters: Name Type Description Default net neural network parametrizing mean and standard deviation of diagonal Gaussian required Source code in normflows/distributions/decoder.py 39 40 41 42 43 44 45 46 def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean and standard deviation of diagonal Gaussian \"\"\" super () . __init__ () self . net = net decoder_test distribution_test DistributionTest Bases: TestCase Generic test case for distribution modules Source code in normflows/distributions/distribution_test.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 class DistributionTest ( unittest . TestCase ): \"\"\" Generic test case for distribution modules \"\"\" def assertClose ( self , actual , expected , atol = None , rtol = None ): assert_close ( actual , expected , atol = atol , rtol = rtol ) def checkForward ( self , distribution , num_samples = 1 , ** kwargs ): # Do forward outputs , log_p = distribution ( num_samples , ** kwargs ) # Check type assert outputs . dtype == log_p . dtype # Check shape assert log_p . shape [ 0 ] == num_samples assert outputs . shape [ 0 ] == num_samples # Check dim assert outputs . dim () > log_p . dim () # Return results return outputs , log_p def checkLogProb ( self , distribution , inputs , ** kwargs ): # Compute log prob log_p = distribution . log_prob ( inputs , ** kwargs ) # Check type assert log_p . dtype == inputs . dtype # Check shape assert log_p . shape [ 0 ] == inputs . shape [ 0 ] # Return results return log_p def checkSample ( self , distribution , num_samples = 1 , ** kwargs ): # Do forward outputs = distribution . sample ( num_samples , ** kwargs ) # Check shape assert outputs . shape [ 0 ] == num_samples # Check dim assert outputs . dim () > 1 # Return results return outputs def checkForwardLogProb ( self , distribution , num_samples = 1 , atol = None , rtol = None , ** kwargs ): # Check forward outputs , log_p = self . checkForward ( distribution , num_samples , ** kwargs ) # Check log prob log_p_ = self . checkLogProb ( distribution , outputs , ** kwargs ) # Check consistency self . assertClose ( log_p_ , log_p , atol , rtol ) encoder BaseEncoder Bases: Module Base distribution of a flow-based variational autoencoder Parameters of the distribution depend of the target variable x Source code in normflows/distributions/encoder.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 class BaseEncoder ( nn . Module ): \"\"\" Base distribution of a flow-based variational autoencoder Parameters of the distribution depend of the target variable x \"\"\" def __init__ ( self ): super () . __init__ () def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, first dimension is batch size num_samples: number of samples to draw per element of mini-batch Returns sample of z for x, log probability for sample \"\"\" raise NotImplementedError def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch size x: Variable to condition on, first dimension is batch size Returns: log probability of z given x \"\"\" raise NotImplementedError forward ( x , num_samples = 1 ) Parameters: Name Type Description Default x Variable to condition on, first dimension is batch size required num_samples number of samples to draw per element of mini-batch 1 Returns sample of z for x, log probability for sample Source code in normflows/distributions/encoder.py 15 16 17 18 19 20 21 22 23 24 def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, first dimension is batch size num_samples: number of samples to draw per element of mini-batch Returns sample of z for x, log probability for sample \"\"\" raise NotImplementedError log_prob ( z , x ) Parameters: Name Type Description Default z Primary random variable, first dimension is batch size required x Variable to condition on, first dimension is batch size required Returns: Type Description log probability of z given x Source code in normflows/distributions/encoder.py 26 27 28 29 30 31 32 33 34 35 36 def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch size x: Variable to condition on, first dimension is batch size Returns: log probability of z given x \"\"\" raise NotImplementedError ConstDiagGaussian Bases: BaseEncoder Source code in normflows/distributions/encoder.py 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 class ConstDiagGaussian ( BaseEncoder ): def __init__ ( self , loc , scale ): \"\"\"Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x Args: loc: mean vector of the distribution scale: vector of the standard deviations on the diagonal of the covariance matrix \"\"\" super () . __init__ () self . d = len ( loc ) if not torch . is_tensor ( loc ): loc = torch . tensor ( loc ) if not torch . is_tensor ( scale ): scale = torch . tensor ( scale ) self . loc = nn . Parameter ( loc . reshape (( 1 , 1 , self . d ))) self . scale = nn . Parameter ( scale ) def forward ( self , x = None , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, will only be used to determine the batch size num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" if x is not None : batch_size = len ( x ) else : batch_size = 1 eps = torch . randn (( batch_size , num_samples , self . d ), device = x . device ) z = self . loc + self . scale * eps log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * torch . pow ( eps , 2 ), 2 ) return z , log_q def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * (( z - self . loc ) / self . scale ) ** 2 , 2 ) return log_q __init__ ( loc , scale ) Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x Parameters: Name Type Description Default loc mean vector of the distribution required scale vector of the standard deviations on the diagonal of the covariance matrix required Source code in normflows/distributions/encoder.py 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 def __init__ ( self , loc , scale ): \"\"\"Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x Args: loc: mean vector of the distribution scale: vector of the standard deviations on the diagonal of the covariance matrix \"\"\" super () . __init__ () self . d = len ( loc ) if not torch . is_tensor ( loc ): loc = torch . tensor ( loc ) if not torch . is_tensor ( scale ): scale = torch . tensor ( scale ) self . loc = nn . Parameter ( loc . reshape (( 1 , 1 , self . d ))) self . scale = nn . Parameter ( scale ) forward ( x = None , num_samples = 1 ) Parameters: Name Type Description Default x Variable to condition on, will only be used to determine the batch size None num_samples number of samples to draw per element of mini-batch 1 Returns: Type Description sample of z for x, log probability for sample Source code in normflows/distributions/encoder.py 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 def forward ( self , x = None , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, will only be used to determine the batch size num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" if x is not None : batch_size = len ( x ) else : batch_size = 1 eps = torch . randn (( batch_size , num_samples , self . d ), device = x . device ) z = self . loc + self . scale * eps log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * torch . pow ( eps , 2 ), 2 ) return z , log_q log_prob ( z , x ) Parameters: Name Type Description Default z Primary random variable, first dimension is batch dimension required x Variable to condition on, first dimension is batch dimension required Returns: Type Description log probability of z given x Source code in normflows/distributions/encoder.py 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * (( z - self . loc ) / self . scale ) ** 2 , 2 ) return log_q NNDiagGaussian Bases: BaseEncoder Diagonal Gaussian distribution with mean and variance determined by a neural network Source code in normflows/distributions/encoder.py 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 class NNDiagGaussian ( BaseEncoder ): \"\"\" Diagonal Gaussian distribution with mean and variance determined by a neural network \"\"\" def __init__ ( self , net ): \"\"\"Construtor Args: net: net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) \"\"\" super () . __init__ () self . net = net def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" batch_size = len ( x ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) std = torch . exp ( 0.5 * mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) eps = torch . randn ( ( batch_size , num_samples ) + tuple ( mean . size ()[ 2 :]), device = x . device ) z = mean + std * eps log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( std ) + 0.5 * torch . pow ( eps , 2 ), list ( range ( 2 , z . dim ()))) return z , log_q def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) var = torch . exp ( mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . log ( var ) + ( z - mean ) ** 2 / var , 2 ) return log_q __init__ ( net ) Construtor Parameters: Name Type Description Default net net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) required Source code in normflows/distributions/encoder.py 135 136 137 138 139 140 141 142 def __init__ ( self , net ): \"\"\"Construtor Args: net: net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) \"\"\" super () . __init__ () self . net = net forward ( x , num_samples = 1 ) Parameters: Name Type Description Default x Variable to condition on required num_samples number of samples to draw per element of mini-batch 1 Returns: Type Description sample of z for x, log probability for sample Source code in normflows/distributions/encoder.py 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" batch_size = len ( x ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) std = torch . exp ( 0.5 * mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) eps = torch . randn ( ( batch_size , num_samples ) + tuple ( mean . size ()[ 2 :]), device = x . device ) z = mean + std * eps log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( std ) + 0.5 * torch . pow ( eps , 2 ), list ( range ( 2 , z . dim ()))) return z , log_q log_prob ( z , x ) Parameters: Name Type Description Default z Primary random variable, first dimension is batch dimension required x Variable to condition on, first dimension is batch dimension required Returns: Type Description log probability of z given x Source code in normflows/distributions/encoder.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) var = torch . exp ( mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . log ( var ) + ( z - mean ) ** 2 / var , 2 ) return log_q encoder_test linear_interpolation LinearInterpolation Linear interpolation of two distributions in the log space Source code in normflows/distributions/linear_interpolation.py 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 class LinearInterpolation : \"\"\" Linear interpolation of two distributions in the log space \"\"\" def __init__ ( self , dist1 , dist2 , alpha ): \"\"\"Constructor Interpolation parameter alpha: ``` log_p = alpha * log_p_1 + (1 - alpha) * log_p_2 ``` Args: dist1: First distribution dist2: Second distribution alpha: Interpolation parameter \"\"\" self . alpha = alpha self . dist1 = dist1 self . dist2 = dist2 def log_prob ( self , z ): return self . alpha * self . dist1 . log_prob ( z ) + ( 1 - self . alpha ) * self . dist2 . log_prob ( z ) __init__ ( dist1 , dist2 , alpha ) Constructor Interpolation parameter alpha: log_p = alpha * log_p_1 + (1 - alpha) * log_p_2 Parameters: Name Type Description Default dist1 First distribution required dist2 Second distribution required alpha Interpolation parameter required Source code in normflows/distributions/linear_interpolation.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 def __init__ ( self , dist1 , dist2 , alpha ): \"\"\"Constructor Interpolation parameter alpha: ``` log_p = alpha * log_p_1 + (1 - alpha) * log_p_2 ``` Args: dist1: First distribution dist2: Second distribution alpha: Interpolation parameter \"\"\" self . alpha = alpha self . dist1 = dist1 self . dist2 = dist2 mh_proposal DiagGaussianProposal Bases: MHProposal Diagonal Gaussian distribution with previous value as mean as a proposal for Metropolis Hastings algorithm Source code in normflows/distributions/mh_proposal.py 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 class DiagGaussianProposal ( MHProposal ): \"\"\" Diagonal Gaussian distribution with previous value as mean as a proposal for Metropolis Hastings algorithm \"\"\" def __init__ ( self , shape , scale ): \"\"\"Constructor Args: shape: Shape of variables to sample scale: Standard deviation of distribution \"\"\" super () . __init__ () self . shape = shape self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu . unsqueeze ( 0 )) def sample ( self , z ): num_samples = len ( z ) eps = torch . randn (( num_samples ,) + self . shape , dtype = z . dtype , device = z . device ) z_ = eps * self . scale + z return z_ def log_prob ( self , z_ , z ): log_p = - 0.5 * np . prod ( self . shape ) * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * torch . pow (( z_ - z ) / self . scale , 2 ), list ( range ( 1 , z . dim ())), ) return log_p def forward ( self , z ): num_samples = len ( z ) eps = torch . randn (( num_samples ,) + self . shape , dtype = z . dtype , device = z . device ) z_ = eps * self . scale + z log_p_diff = torch . zeros ( num_samples , dtype = z . dtype , device = z . device ) return z_ , log_p_diff __init__ ( shape , scale ) Constructor Parameters: Name Type Description Default shape Shape of variables to sample required scale Standard deviation of distribution required Source code in normflows/distributions/mh_proposal.py 53 54 55 56 57 58 59 60 61 62 63 def __init__ ( self , shape , scale ): \"\"\"Constructor Args: shape: Shape of variables to sample scale: Standard deviation of distribution \"\"\" super () . __init__ () self . shape = shape self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu . unsqueeze ( 0 )) MHProposal Bases: Module Proposal distribution for the Metropolis Hastings algorithm Source code in normflows/distributions/mh_proposal.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 class MHProposal ( nn . Module ): \"\"\" Proposal distribution for the Metropolis Hastings algorithm \"\"\" def __init__ ( self ): super () . __init__ () def sample ( self , z ): \"\"\" Sample new value based on previous z \"\"\" raise NotImplementedError def log_prob ( self , z_ , z ): \"\"\" Args: z_: Potential new sample z: Previous sample Returns: Log probability of proposal distribution \"\"\" raise NotImplementedError def forward ( self , z ): \"\"\"Draw samples given z and compute log probability difference ``` log(p(z | z_new)) - log(p(z_new | z)) ``` Args: z: Previous samples Returns: Proposal, difference of log probability ratio \"\"\" raise NotImplementedError forward ( z ) Draw samples given z and compute log probability difference log(p(z | z_new)) - log(p(z_new | z)) Parameters: Name Type Description Default z Previous samples required Returns: Type Description Proposal, difference of log probability ratio Source code in normflows/distributions/mh_proposal.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 def forward ( self , z ): \"\"\"Draw samples given z and compute log probability difference ``` log(p(z | z_new)) - log(p(z_new | z)) ``` Args: z: Previous samples Returns: Proposal, difference of log probability ratio \"\"\" raise NotImplementedError log_prob ( z_ , z ) Parameters: Name Type Description Default z_ Potential new sample required z Previous sample required Returns: Type Description Log probability of proposal distribution Source code in normflows/distributions/mh_proposal.py 20 21 22 23 24 25 26 27 28 29 def log_prob ( self , z_ , z ): \"\"\" Args: z_: Potential new sample z: Previous sample Returns: Log probability of proposal distribution \"\"\" raise NotImplementedError sample ( z ) Sample new value based on previous z Source code in normflows/distributions/mh_proposal.py 14 15 16 17 18 def sample ( self , z ): \"\"\" Sample new value based on previous z \"\"\" raise NotImplementedError prior ImagePrior Bases: Module Intensities of an image determine probability density of prior Source code in normflows/distributions/prior.py 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 class ImagePrior ( nn . Module ): \"\"\" Intensities of an image determine probability density of prior \"\"\" def __init__ ( self , image , x_range = [ - 3 , 3 ], y_range = [ - 3 , 3 ], eps = 1.0e-10 ): \"\"\"Constructor Args: image: image as np matrix x_range: x range to position image at y_range: y range to position image at eps: small value to add to image to avoid log(0) problems \"\"\" super () . __init__ () image_ = np . flip ( image , 0 ) . transpose () + eps self . image_cpu = torch . tensor ( image_ / np . max ( image_ )) self . image_size_cpu = self . image_cpu . size () self . x_range = torch . tensor ( x_range ) self . y_range = torch . tensor ( y_range ) self . register_buffer ( \"image\" , self . image_cpu ) self . register_buffer ( \"image_size\" , torch . tensor ( self . image_size_cpu ) . unsqueeze ( 0 ) ) self . register_buffer ( \"density\" , torch . log ( self . image_cpu / torch . sum ( self . image_cpu )) ) self . register_buffer ( \"scale\" , torch . tensor ( [[ self . x_range [ 1 ] - self . x_range [ 0 ], self . y_range [ 1 ] - self . y_range [ 0 ]]] ), ) self . register_buffer ( \"shift\" , torch . tensor ([[ self . x_range [ 0 ], self . y_range [ 0 ]]]) ) def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" z_ = torch . clamp (( z - self . shift ) / self . scale , max = 1 , min = 0 ) ind = ( z_ * ( self . image_size - 1 )) . long () return self . density [ ind [:, 0 ], ind [:, 1 ]] def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" z_ = torch . rand ( ( num_steps , 2 ), dtype = self . image . dtype , device = self . image . device ) prob = torch . rand ( num_steps , dtype = self . image . dtype , device = self . image . device ) ind = ( z_ * ( self . image_size - 1 )) . long () intensity = self . image [ ind [:, 0 ], ind [:, 1 ]] accept = intensity > prob z = z_ [ accept , :] * self . scale + self . shift return z def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . ones (( 0 , 2 ), dtype = self . image . dtype , device = self . image . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z __init__ ( image , x_range = [ - 3 , 3 ], y_range = [ - 3 , 3 ], eps = 1e-10 ) Constructor Parameters: Name Type Description Default image image as np matrix required x_range x range to position image at [-3, 3] y_range y range to position image at [-3, 3] eps small value to add to image to avoid log(0) problems 1e-10 Source code in normflows/distributions/prior.py 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 def __init__ ( self , image , x_range = [ - 3 , 3 ], y_range = [ - 3 , 3 ], eps = 1.0e-10 ): \"\"\"Constructor Args: image: image as np matrix x_range: x range to position image at y_range: y range to position image at eps: small value to add to image to avoid log(0) problems \"\"\" super () . __init__ () image_ = np . flip ( image , 0 ) . transpose () + eps self . image_cpu = torch . tensor ( image_ / np . max ( image_ )) self . image_size_cpu = self . image_cpu . size () self . x_range = torch . tensor ( x_range ) self . y_range = torch . tensor ( y_range ) self . register_buffer ( \"image\" , self . image_cpu ) self . register_buffer ( \"image_size\" , torch . tensor ( self . image_size_cpu ) . unsqueeze ( 0 ) ) self . register_buffer ( \"density\" , torch . log ( self . image_cpu / torch . sum ( self . image_cpu )) ) self . register_buffer ( \"scale\" , torch . tensor ( [[ self . x_range [ 1 ] - self . x_range [ 0 ], self . y_range [ 1 ] - self . y_range [ 0 ]]] ), ) self . register_buffer ( \"shift\" , torch . tensor ([[ self . x_range [ 0 ], self . y_range [ 0 ]]]) ) log_prob ( z ) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 59 60 61 62 63 64 65 66 67 68 69 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" z_ = torch . clamp (( z - self . shift ) / self . scale , max = 1 , min = 0 ) ind = ( z_ * ( self . image_size - 1 )) . long () return self . density [ ind [:, 0 ], ind [:, 1 ]] rejection_sampling ( num_steps = 1 ) Perform rejection sampling on image distribution Parameters: Name Type Description Default num_steps Number of rejection sampling steps to perform 1 Returns: Type Description Accepted samples Source code in normflows/distributions/prior.py 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" z_ = torch . rand ( ( num_steps , 2 ), dtype = self . image . dtype , device = self . image . device ) prob = torch . rand ( num_steps , dtype = self . image . dtype , device = self . image . device ) ind = ( z_ * ( self . image_size - 1 )) . long () intensity = self . image [ ind [:, 0 ], ind [:, 1 ]] accept = intensity > prob z = z_ [ accept , :] * self . scale + self . shift return z sample ( num_samples = 1 ) Sample from image distribution through rejection sampling Parameters: Name Type Description Default num_samples Number of samples to draw 1 Returns: Type Description Samples Source code in normflows/distributions/prior.py 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . ones (( 0 , 2 ), dtype = self . image . dtype , device = self . image . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z PriorDistribution Source code in normflows/distributions/prior.py 6 7 8 9 10 11 12 13 14 15 16 17 18 class PriorDistribution : def __init__ ( self ): raise NotImplementedError def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError log_prob ( z ) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 10 11 12 13 14 15 16 17 18 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError Sinusoidal Bases: PriorDistribution Source code in normflows/distributions/prior.py 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 class Sinusoidal ( PriorDistribution ): def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density given by ``` w_1(z) = sin(2*pi / period * z[0]) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 ``` Args: scale: scale of the distribution, see formula period: period of the sinosoidal \"\"\" self . scale = scale self . period = period def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 w_1(z) = sin(2*pi / period * z[0]) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) log_prob = ( - 0.5 * (( z_ [ 1 ] - w_1 ( z_ )) / ( self . scale )) ** 2 - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) # add Gaussian envelope for valid p(z) return log_prob __init__ ( scale , period ) Distribution 2d with sinusoidal density given by w_1(z) = sin(2*pi / period * z[0]) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 Parameters: Name Type Description Default scale scale of the distribution, see formula required period period of the sinosoidal required Source code in normflows/distributions/prior.py 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density given by ``` w_1(z) = sin(2*pi / period * z[0]) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 ``` Args: scale: scale of the distribution, see formula period: period of the sinosoidal \"\"\" self . scale = scale self . period = period log_prob ( z ) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 w_1(z) = sin(2*pi / period * z[0]) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 w_1(z) = sin(2*pi / period * z[0]) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) log_prob = ( - 0.5 * (( z_ [ 1 ] - w_1 ( z_ )) / ( self . scale )) ** 2 - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) # add Gaussian envelope for valid p(z) return log_prob Sinusoidal_gap Bases: PriorDistribution Source code in normflows/distributions/prior.py 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 class Sinusoidal_gap ( PriorDistribution ): def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with gap given by ``` w_1(z) = sin(2*pi / period * z[0]) w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w2_scale = 0.6 self . w2_amp = 3.0 self . w2_mu = 1.0 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_2 = lambda x : self . w2_amp * torch . exp ( - 0.5 * (( z_ [ 0 ] - self . w2_mu ) / self . w2_scale ) ** 2 ) eps = torch . abs ( w_2 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_2 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / self . scale ) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob __init__ ( scale , period ) Distribution 2d with sinusoidal density with gap given by w_1(z) = sin(2*pi / period * z[0]) w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2)) Parameters: Name Type Description Default loc distance of modes from the origin required scale scale of modes required Source code in normflows/distributions/prior.py 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with gap given by ``` w_1(z) = sin(2*pi / period * z[0]) w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w2_scale = 0.6 self . w2_amp = 3.0 self . w2_mu = 1.0 log_prob ( z ) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_2 = lambda x : self . w2_amp * torch . exp ( - 0.5 * (( z_ [ 0 ] - self . w2_mu ) / self . w2_scale ) ** 2 ) eps = torch . abs ( w_2 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_2 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / self . scale ) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob Sinusoidal_split Bases: PriorDistribution Source code in normflows/distributions/prior.py 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 class Sinusoidal_split ( PriorDistribution ): def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with split given by ``` w_1(z) = sin(2*pi / period * z[0]) w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w3_scale = 0.3 self . w3_amp = 3.0 self . w3_mu = 1.0 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_3 = lambda x : self . w3_amp * torch . sigmoid ( ( z_ [ 0 ] - self . w3_mu ) / self . w3_scale ) eps = torch . abs ( w_3 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_3 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / ( self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob __init__ ( scale , period ) Distribution 2d with sinusoidal density with split given by w_1(z) = sin(2*pi / period * z[0]) w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2)) Parameters: Name Type Description Default loc distance of modes from the origin required scale scale of modes required Source code in normflows/distributions/prior.py 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with split given by ``` w_1(z) = sin(2*pi / period * z[0]) w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w3_scale = 0.3 self . w3_amp = 3.0 self . w3_mu = 1.0 log_prob ( z ) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_3 = lambda x : self . w3_amp * torch . sigmoid ( ( z_ [ 0 ] - self . w3_mu ) / self . w3_scale ) eps = torch . abs ( w_3 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_3 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / ( self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob Smiley Bases: PriorDistribution Source code in normflows/distributions/prior.py 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 class Smiley ( PriorDistribution ): def __init__ ( self , scale ): \"\"\"Distribution 2d of a smiley :) Args: scale: scale of the smiley \"\"\" self . scale = scale self . loc = 2.0 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z log_prob = ( - 0.5 * (( torch . norm ( z_ , dim = 0 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( torch . abs ( z_ [ 1 ] + 0.8 ) - 1.2 ) / ( 2 * self . scale )) ** 2 ) return log_prob __init__ ( scale ) Distribution 2d of a smiley :) Parameters: Name Type Description Default scale scale of the smiley required Source code in normflows/distributions/prior.py 300 301 302 303 304 305 306 307 def __init__ ( self , scale ): \"\"\"Distribution 2d of a smiley :) Args: scale: scale of the smiley \"\"\" self . scale = scale self . loc = 2.0 log_prob ( z ) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z log_prob = ( - 0.5 * (( torch . norm ( z_ , dim = 0 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( torch . abs ( z_ [ 1 ] + 0.8 ) - 1.2 ) / ( 2 * self . scale )) ** 2 ) return log_prob TwoModes Bases: PriorDistribution Source code in normflows/distributions/prior.py 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 class TwoModes ( PriorDistribution ): def __init__ ( self , loc , scale ): \"\"\"Distribution 2d with two modes Distribution 2d with two modes at ```z[0] = -loc``` and ```z[0] = loc``` following the density ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . loc = loc self . scale = scale def log_prob ( self , z ): \"\"\" ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) eps = torch . abs ( torch . tensor ( self . loc )) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( a - eps ) / ( 3 * self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( a * eps ) / ( 3 * self . scale ) ** 2 )) ) return log_prob __init__ ( loc , scale ) Distribution 2d with two modes Distribution 2d with two modes at z[0] = -loc and z[0] = loc following the density log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) Args: loc: distance of modes from the origin scale: scale of modes Source code in normflows/distributions/prior.py 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 def __init__ ( self , loc , scale ): \"\"\"Distribution 2d with two modes Distribution 2d with two modes at ```z[0] = -loc``` and ```z[0] = loc``` following the density ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . loc = loc self . scale = scale log_prob ( z ) log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 def log_prob ( self , z ): \"\"\" ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) eps = torch . abs ( torch . tensor ( self . loc )) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( a - eps ) / ( 3 * self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( a * eps ) / ( 3 * self . scale ) ** 2 )) ) return log_prob prior_test target CircularGaussianMixture Bases: Module Two-dimensional Gaussian mixture arranged in a circle Source code in normflows/distributions/target.py 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 class CircularGaussianMixture ( nn . Module ): \"\"\" Two-dimensional Gaussian mixture arranged in a circle \"\"\" def __init__ ( self , n_modes = 8 ): \"\"\"Constructor Args: n_modes: Number of modes \"\"\" super ( CircularGaussianMixture , self ) . __init__ () self . n_modes = n_modes self . register_buffer ( \"scale\" , torch . tensor ( 2 / 3 * np . sin ( np . pi / self . n_modes )) . float () ) def log_prob ( self , z ): d = torch . zeros (( len ( z ), 0 ), dtype = z . dtype , device = z . device ) for i in range ( self . n_modes ): d_ = ( ( z [:, 0 ] - 2 * np . sin ( 2 * np . pi / self . n_modes * i )) ** 2 + ( z [:, 1 ] - 2 * np . cos ( 2 * np . pi / self . n_modes * i )) ** 2 ) / ( 2 * self . scale ** 2 ) d = torch . cat (( d , d_ [:, None ]), 1 ) log_p = - torch . log ( 2 * np . pi * self . scale ** 2 * self . n_modes ) + torch . logsumexp ( - d , 1 ) return log_p def sample ( self , num_samples = 1 ): eps = torch . randn ( ( num_samples , 2 ), dtype = self . scale . dtype , device = self . scale . device ) phi = ( 2 * np . pi / self . n_modes * torch . randint ( 0 , self . n_modes , ( num_samples ,), device = self . scale . device ) ) loc = torch . stack (( 2 * torch . sin ( phi ), 2 * torch . cos ( phi )), 1 ) . type ( eps . dtype ) return eps * self . scale + loc __init__ ( n_modes = 8 ) Constructor Parameters: Name Type Description Default n_modes Number of modes 8 Source code in normflows/distributions/target.py 137 138 139 140 141 142 143 144 145 146 147 def __init__ ( self , n_modes = 8 ): \"\"\"Constructor Args: n_modes: Number of modes \"\"\" super ( CircularGaussianMixture , self ) . __init__ () self . n_modes = n_modes self . register_buffer ( \"scale\" , torch . tensor ( 2 / 3 * np . sin ( np . pi / self . n_modes )) . float () ) ConditionalDiagGaussian Bases: Target Gaussian distribution conditioned on its mean and standard deviation The first half of the entries of the condition, also called context, are the mean, while the second half are the standard deviation. Source code in normflows/distributions/target.py 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 class ConditionalDiagGaussian ( Target ): \"\"\" Gaussian distribution conditioned on its mean and standard deviation The first half of the entries of the condition, also called context, are the mean, while the second half are the standard deviation. \"\"\" def log_prob ( self , z , context = None ): d = z . shape [ - 1 ] loc = context [:, : d ] scale = context [:, d :] log_p = - 0.5 * d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( scale ) + 0.5 * torch . pow (( z - loc ) / scale , 2 ), dim =- 1 ) return log_p def sample ( self , num_samples = 1 , context = None ): d = context . shape [ - 1 ] // 2 loc = context [:, : d ] scale = context [:, d :] eps = torch . randn ( ( num_samples , d ), dtype = context . dtype , device = context . device ) z = loc + scale * eps return z RingMixture Bases: Target Mixture of ring distributions in two dimensions Source code in normflows/distributions/target.py 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 class RingMixture ( Target ): \"\"\" Mixture of ring distributions in two dimensions \"\"\" def __init__ ( self , n_rings = 2 ): super () . __init__ () self . n_dims = 2 self . max_log_prob = 0.0 self . n_rings = n_rings self . scale = 1 / 4 / self . n_rings def log_prob ( self , z ): d = torch . zeros (( len ( z ), 0 ), dtype = z . dtype , device = z . device ) for i in range ( self . n_rings ): d_ = (( torch . norm ( z , dim = 1 ) - 2 / self . n_rings * ( i + 1 )) ** 2 ) / ( 2 * self . scale ** 2 ) d = torch . cat (( d , d_ [:, None ]), 1 ) return torch . logsumexp ( - d , 1 ) Target Bases: Module Sample target distributions to test models Source code in normflows/distributions/target.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 class Target ( nn . Module ): \"\"\" Sample target distributions to test models \"\"\" def __init__ ( self , prop_scale = torch . tensor ( 6.0 ), prop_shift = torch . tensor ( - 3.0 )): \"\"\"Constructor Args: prop_scale: Scale for the uniform proposal prop_shift: Shift for the uniform proposal \"\"\" super () . __init__ () self . register_buffer ( \"prop_scale\" , prop_scale ) self . register_buffer ( \"prop_shift\" , prop_shift ) def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError ( \"The log probability is not implemented yet.\" ) def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" eps = torch . rand ( ( num_steps , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device , ) z_ = self . prop_scale * eps + self . prop_shift prob = torch . rand ( num_steps , dtype = self . prop_scale . dtype , device = self . prop_scale . device ) prob_ = torch . exp ( self . log_prob ( z_ ) - self . max_log_prob ) accept = prob_ > prob z = z_ [ accept , :] return z def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . zeros ( ( 0 , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z __init__ ( prop_scale = torch . tensor ( 6.0 ), prop_shift = torch . tensor ( - 3.0 )) Constructor Parameters: Name Type Description Default prop_scale Scale for the uniform proposal tensor (6.0) prop_shift Shift for the uniform proposal tensor (-3.0) Source code in normflows/distributions/target.py 13 14 15 16 17 18 19 20 21 22 def __init__ ( self , prop_scale = torch . tensor ( 6.0 ), prop_shift = torch . tensor ( - 3.0 )): \"\"\"Constructor Args: prop_scale: Scale for the uniform proposal prop_shift: Shift for the uniform proposal \"\"\" super () . __init__ () self . register_buffer ( \"prop_scale\" , prop_scale ) self . register_buffer ( \"prop_shift\" , prop_shift ) log_prob ( z ) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/target.py 24 25 26 27 28 29 30 31 32 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError ( \"The log probability is not implemented yet.\" ) rejection_sampling ( num_steps = 1 ) Perform rejection sampling on image distribution Parameters: Name Type Description Default num_steps Number of rejection sampling steps to perform 1 Returns: Type Description Accepted samples Source code in normflows/distributions/target.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" eps = torch . rand ( ( num_steps , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device , ) z_ = self . prop_scale * eps + self . prop_shift prob = torch . rand ( num_steps , dtype = self . prop_scale . dtype , device = self . prop_scale . device ) prob_ = torch . exp ( self . log_prob ( z_ ) - self . max_log_prob ) accept = prob_ > prob z = z_ [ accept , :] return z sample ( num_samples = 1 ) Sample from image distribution through rejection sampling Parameters: Name Type Description Default num_samples Number of samples to draw 1 Returns: Type Description Samples Source code in normflows/distributions/target.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . zeros ( ( 0 , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z TwoIndependent Bases: Target Target distribution that combines two independent distributions of equal size into one distribution. This is needed for Augmented Normalizing Flows, see https://arxiv.org/abs/2002.07101 Source code in normflows/distributions/target.py 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 class TwoIndependent ( Target ): \"\"\" Target distribution that combines two independent distributions of equal size into one distribution. This is needed for Augmented Normalizing Flows, see https://arxiv.org/abs/2002.07101 \"\"\" def __init__ ( self , target1 , target2 ): super () . __init__ () self . target1 = target1 self . target2 = target2 self . split = Split ( mode = 'channel' ) def log_prob ( self , z ): z1 , z2 = self . split ( z )[ 0 ] return self . target1 . log_prob ( z1 ) + self . target2 . log_prob ( z2 ) def sample ( self , num_samples = 1 ): z1 = self . target1 . sample ( num_samples ) z2 = self . target2 . sample ( num_samples ) return self . split . inverse ([ z1 , z2 ])[ 0 ] TwoMoons Bases: Target Bimodal two-dimensional distribution Source code in normflows/distributions/target.py 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 class TwoMoons ( Target ): \"\"\" Bimodal two-dimensional distribution \"\"\" def __init__ ( self ): super () . __init__ () self . n_dims = 2 self . max_log_prob = 0.0 def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2 + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2) + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - 2 ) / 0.2 ) ** 2 - 0.5 * (( a - 2 ) / 0.3 ) ** 2 + torch . log ( 1 + torch . exp ( - 4 * a / 0.09 )) ) return log_prob log_prob ( z ) log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2 + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2) + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2)) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/target.py 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2 + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2) + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - 2 ) / 0.2 ) ** 2 - 0.5 * (( a - 2 ) / 0.3 ) ** 2 + torch . log ( 1 + torch . exp ( - 4 * a / 0.09 )) ) return log_prob target_test flows affine autoregressive Autoregressive Bases: Flow Transforms each input variable with an invertible elementwise transformation. The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables. NOTE Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform. Source code in normflows/flows/affine/autoregressive.py 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 class Autoregressive ( Flow ): \"\"\"Transforms each input variable with an invertible elementwise transformation. The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables. **NOTE** Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform. \"\"\" def __init__ ( self , autoregressive_net ): super ( Autoregressive , self ) . __init__ () self . autoregressive_net = autoregressive_net def forward ( self , inputs , context = None ): autoregressive_params = self . autoregressive_net ( inputs , context ) outputs , logabsdet = self . _elementwise_forward ( inputs , autoregressive_params ) return outputs , logabsdet def inverse ( self , inputs , context = None ): num_inputs = np . prod ( inputs . shape [ 1 :]) outputs = torch . zeros_like ( inputs ) logabsdet = None for _ in range ( num_inputs ): autoregressive_params = self . autoregressive_net ( outputs , context ) outputs , logabsdet = self . _elementwise_inverse ( inputs , autoregressive_params ) return outputs , logabsdet def _output_dim_multiplier ( self ): raise NotImplementedError () def _elementwise_forward ( self , inputs , autoregressive_params ): raise NotImplementedError () def _elementwise_inverse ( self , inputs , autoregressive_params ): raise NotImplementedError () MaskedAffineAutoregressive Bases: Autoregressive Masked affine autoregressive flow, mostly referred to as Masked Autoregressive Flow (MAF), see arXiv 1705.07057 . Source code in normflows/flows/affine/autoregressive.py 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 class MaskedAffineAutoregressive ( Autoregressive ): \"\"\" Masked affine autoregressive flow, mostly referred to as Masked Autoregressive Flow (MAF), see [arXiv 1705.07057](https://arxiv.org/abs/1705.07057). \"\"\" def __init__ ( self , features , hidden_features , context_features = None , num_blocks = 2 , use_residual_blocks = True , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , ): \"\"\"Constructor Args: features: Number of features/input dimensions hidden_features: Number of hidden units in the MADE network context_features: Number of context/conditional features num_blocks: Number of blocks in the MADE network use_residual_blocks: Flag whether residual blocks should be used random_mask: Flag whether to use random masks activation: Activation function to be used in the MADE network dropout_probability: Dropout probability in the MADE network use_batch_norm: Flag whether batch normalization should be used \"\"\" self . features = features made = made_module . MADE ( features = features , hidden_features = hidden_features , context_features = context_features , num_blocks = num_blocks , output_multiplier = self . _output_dim_multiplier (), use_residual_blocks = use_residual_blocks , random_mask = random_mask , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) super ( MaskedAffineAutoregressive , self ) . __init__ ( made ) def _output_dim_multiplier ( self ): return 2 def _elementwise_forward ( self , inputs , autoregressive_params ): unconstrained_scale , shift = self . _unconstrained_scale_and_shift ( autoregressive_params ) scale = torch . sigmoid ( unconstrained_scale + 2.0 ) + 1e-3 log_scale = torch . log ( scale ) outputs = scale * inputs + shift logabsdet = utils . sum_except_batch ( log_scale , num_batch_dims = 1 ) return outputs , logabsdet def _elementwise_inverse ( self , inputs , autoregressive_params ): unconstrained_scale , shift = self . _unconstrained_scale_and_shift ( autoregressive_params ) scale = torch . sigmoid ( unconstrained_scale + 2.0 ) + 1e-3 log_scale = torch . log ( scale ) outputs = ( inputs - shift ) / scale logabsdet = - utils . sum_except_batch ( log_scale , num_batch_dims = 1 ) return outputs , logabsdet def _unconstrained_scale_and_shift ( self , autoregressive_params ): # split_idx = autoregressive_params.size(1) // 2 # unconstrained_scale = autoregressive_params[..., :split_idx] # shift = autoregressive_params[..., split_idx:] # return unconstrained_scale, shift autoregressive_params = autoregressive_params . view ( - 1 , self . features , self . _output_dim_multiplier () ) unconstrained_scale = autoregressive_params [ ... , 0 ] shift = autoregressive_params [ ... , 1 ] return unconstrained_scale , shift __init__ ( features , hidden_features , context_features = None , num_blocks = 2 , use_residual_blocks = True , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False ) Constructor Parameters: Name Type Description Default features Number of features/input dimensions required hidden_features Number of hidden units in the MADE network required context_features Number of context/conditional features None num_blocks Number of blocks in the MADE network 2 use_residual_blocks Flag whether residual blocks should be used True random_mask Flag whether to use random masks False activation Activation function to be used in the MADE network relu dropout_probability Dropout probability in the MADE network 0.0 use_batch_norm Flag whether batch normalization should be used False Source code in normflows/flows/affine/autoregressive.py 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 def __init__ ( self , features , hidden_features , context_features = None , num_blocks = 2 , use_residual_blocks = True , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , ): \"\"\"Constructor Args: features: Number of features/input dimensions hidden_features: Number of hidden units in the MADE network context_features: Number of context/conditional features num_blocks: Number of blocks in the MADE network use_residual_blocks: Flag whether residual blocks should be used random_mask: Flag whether to use random masks activation: Activation function to be used in the MADE network dropout_probability: Dropout probability in the MADE network use_batch_norm: Flag whether batch normalization should be used \"\"\" self . features = features made = made_module . MADE ( features = features , hidden_features = hidden_features , context_features = context_features , num_blocks = num_blocks , output_multiplier = self . _output_dim_multiplier (), use_residual_blocks = use_residual_blocks , random_mask = random_mask , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) super ( MaskedAffineAutoregressive , self ) . __init__ ( made ) autoregressive_test coupling AffineConstFlow Bases: Flow scales and shifts with learned constants per dimension. In the NICE paper there is a scaling layer which is a special case of this where t is None Source code in normflows/flows/affine/coupling.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 class AffineConstFlow ( Flow ): \"\"\" scales and shifts with learned constants per dimension. In the NICE paper there is a scaling layer which is a special case of this where t is None \"\"\" def __init__ ( self , shape , scale = True , shift = True ): \"\"\"Constructor Args: shape: Shape of the coupling layer scale: Flag whether to apply scaling shift: Flag whether to apply shift logscale_factor: Optional factor which can be used to control the scale of the log scale factor \"\"\" super () . __init__ () if scale : self . s = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"s\" , torch . zeros ( shape )[ None ]) if shift : self . t = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"t\" , torch . zeros ( shape )[ None ]) self . n_dim = self . s . dim () self . batch_dims = torch . nonzero ( torch . tensor ( self . s . shape ) == 1 , as_tuple = False )[:, 0 ] . tolist () def forward ( self , z ): z_ = z * torch . exp ( self . s ) + self . t if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = prod_batch_dims * torch . sum ( self . s ) return z_ , log_det def inverse ( self , z ): z_ = ( z - self . t ) * torch . exp ( - self . s ) if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = - prod_batch_dims * torch . sum ( self . s ) return z_ , log_det __init__ ( shape , scale = True , shift = True ) Constructor Parameters: Name Type Description Default shape Shape of the coupling layer required scale Flag whether to apply scaling True shift Flag whether to apply shift True logscale_factor Optional factor which can be used to control the scale of the log scale factor required Source code in normflows/flows/affine/coupling.py 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 def __init__ ( self , shape , scale = True , shift = True ): \"\"\"Constructor Args: shape: Shape of the coupling layer scale: Flag whether to apply scaling shift: Flag whether to apply shift logscale_factor: Optional factor which can be used to control the scale of the log scale factor \"\"\" super () . __init__ () if scale : self . s = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"s\" , torch . zeros ( shape )[ None ]) if shift : self . t = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"t\" , torch . zeros ( shape )[ None ]) self . n_dim = self . s . dim () self . batch_dims = torch . nonzero ( torch . tensor ( self . s . shape ) == 1 , as_tuple = False )[:, 0 ] . tolist () AffineCoupling Bases: Flow Affine Coupling layer as introduced RealNVP paper, see arXiv: 1605.08803 Source code in normflows/flows/affine/coupling.py 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 class AffineCoupling ( Flow ): \"\"\" Affine Coupling layer as introduced RealNVP paper, see arXiv: 1605.08803 \"\"\" def __init__ ( self , param_map , scale = True , scale_map = \"exp\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model \"\"\" super () . __init__ () self . add_module ( \"param_map\" , param_map ) self . scale = scale self . scale_map = scale_map def forward ( self , z ): \"\"\" z is a list of z1 and z2; ```z = [z1, z2]``` z1 is left constant and affine map is applied to z2 with parameters depending on z1 Args: z \"\"\" z1 , z2 = z param = self . param_map ( z1 ) if self . scale : shift = param [:, 0 :: 2 , ... ] scale_ = param [:, 1 :: 2 , ... ] if self . scale_map == \"exp\" : z2 = z2 * torch . exp ( scale_ ) + shift log_det = torch . sum ( scale_ , dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 / scale + shift log_det = - torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid_inv\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 * scale + shift log_det = torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) else : raise NotImplementedError ( \"This scale map is not implemented.\" ) else : z2 = z2 + param log_det = zero_log_det_like_z ( z2 ) return [ z1 , z2 ], log_det def inverse ( self , z ): z1 , z2 = z param = self . param_map ( z1 ) if self . scale : shift = param [:, 0 :: 2 , ... ] scale_ = param [:, 1 :: 2 , ... ] if self . scale_map == \"exp\" : z2 = ( z2 - shift ) * torch . exp ( - scale_ ) log_det = - torch . sum ( scale_ , dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = ( z2 - shift ) * scale log_det = torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid_inv\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = ( z2 - shift ) / scale log_det = - torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) else : raise NotImplementedError ( \"This scale map is not implemented.\" ) else : z2 = z2 - param log_det = zero_log_det_like_z ( z2 ) return [ z1 , z2 ], log_det __init__ ( param_map , scale = True , scale_map = 'exp' ) Constructor Parameters: Name Type Description Default param_map Maps features to shift and scale parameter (if applicable) required scale Flag whether scale shall be applied True scale_map Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model 'exp' Source code in normflows/flows/affine/coupling.py 104 105 106 107 108 109 110 111 112 113 114 115 def __init__ ( self , param_map , scale = True , scale_map = \"exp\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model \"\"\" super () . __init__ () self . add_module ( \"param_map\" , param_map ) self . scale = scale self . scale_map = scale_map forward ( z ) z is a list of z1 and z2; z = [z1, z2] z1 is left constant and affine map is applied to z2 with parameters depending on z1 Source code in normflows/flows/affine/coupling.py 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 def forward ( self , z ): \"\"\" z is a list of z1 and z2; ```z = [z1, z2]``` z1 is left constant and affine map is applied to z2 with parameters depending on z1 Args: z \"\"\" z1 , z2 = z param = self . param_map ( z1 ) if self . scale : shift = param [:, 0 :: 2 , ... ] scale_ = param [:, 1 :: 2 , ... ] if self . scale_map == \"exp\" : z2 = z2 * torch . exp ( scale_ ) + shift log_det = torch . sum ( scale_ , dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 / scale + shift log_det = - torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid_inv\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 * scale + shift log_det = torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) else : raise NotImplementedError ( \"This scale map is not implemented.\" ) else : z2 = z2 + param log_det = zero_log_det_like_z ( z2 ) return [ z1 , z2 ], log_det AffineCouplingBlock Bases: Flow Affine Coupling layer including split and merge operation Source code in normflows/flows/affine/coupling.py 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 class AffineCouplingBlock ( Flow ): \"\"\" Affine Coupling layer including split and merge operation \"\"\" def __init__ ( self , param_map , scale = True , scale_map = \"exp\" , split_mode = \"channel\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Split layer self . flows += [ Split ( split_mode )] # Affine coupling layer self . flows += [ AffineCoupling ( param_map , scale , scale_map )] # Merge layer self . flows += [ Merge ( split_mode )] def forward ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for flow in self . flows : z , log_det = flow ( z ) log_det_tot += log_det return z , log_det_tot def inverse ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_det_tot += log_det return z , log_det_tot __init__ ( param_map , scale = True , scale_map = 'exp' , split_mode = 'channel' ) Constructor Parameters: Name Type Description Default param_map Maps features to shift and scale parameter (if applicable) required scale Flag whether scale shall be applied True scale_map Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow 'exp' split_mode Splitting mode, for possible values see Split class 'channel' Source code in normflows/flows/affine/coupling.py 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 def __init__ ( self , param_map , scale = True , scale_map = \"exp\" , split_mode = \"channel\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Split layer self . flows += [ Split ( split_mode )] # Affine coupling layer self . flows += [ AffineCoupling ( param_map , scale , scale_map )] # Merge layer self . flows += [ Merge ( split_mode )] CCAffineConst Bases: Flow Affine constant flow layer with class-conditional parameters Source code in normflows/flows/affine/coupling.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 class CCAffineConst ( Flow ): \"\"\" Affine constant flow layer with class-conditional parameters \"\"\" def __init__ ( self , shape , num_classes ): super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) self . shape = shape self . s = nn . Parameter ( torch . zeros ( shape )[ None ]) self . t = nn . Parameter ( torch . zeros ( shape )[ None ]) self . s_cc = nn . Parameter ( torch . zeros ( num_classes , np . prod ( shape ))) self . t_cc = nn . Parameter ( torch . zeros ( num_classes , np . prod ( shape ))) self . n_dim = self . s . dim () self . batch_dims = torch . nonzero ( torch . tensor ( self . s . shape ) == 1 , as_tuple = False )[:, 0 ] . tolist () def forward ( self , z , y ): s = self . s + ( y @ self . s_cc ) . view ( - 1 , * self . shape ) t = self . t + ( y @ self . t_cc ) . view ( - 1 , * self . shape ) z_ = z * torch . exp ( s ) + t if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = prod_batch_dims * torch . sum ( s , dim = list ( range ( 1 , self . n_dim ))) return z_ , log_det def inverse ( self , z , y ): s = self . s + ( y @ self . s_cc ) . view ( - 1 , * self . shape ) t = self . t + ( y @ self . t_cc ) . view ( - 1 , * self . shape ) z_ = ( z - t ) * torch . exp ( - s ) if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = - prod_batch_dims * torch . sum ( s , dim = list ( range ( 1 , self . n_dim ))) return z_ , log_det MaskedAffineFlow Bases: Flow RealNVP as introduced in arXiv: 1605.08803 Masked affine flow: f(z) = b * z + (1 - b) * (z * exp(s(b * z)) + t) class AffineHalfFlow(Flow): is MaskedAffineFlow with alternating bit mask NICE is AffineFlow with only shifts (volume preserving) Source code in normflows/flows/affine/coupling.py 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 class MaskedAffineFlow ( Flow ): \"\"\"RealNVP as introduced in [arXiv: 1605.08803](https://arxiv.org/abs/1605.08803) Masked affine flow: ``` f(z) = b * z + (1 - b) * (z * exp(s(b * z)) + t) ``` - class AffineHalfFlow(Flow): is MaskedAffineFlow with alternating bit mask - NICE is AffineFlow with only shifts (volume preserving) \"\"\" def __init__ ( self , b , t = None , s = None ): \"\"\"Constructor Args: b: mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s t: translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied s: scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied \"\"\" super () . __init__ () self . b_cpu = b . view ( 1 , * b . size ()) self . register_buffer ( \"b\" , self . b_cpu ) if s is None : self . s = torch . zeros_like else : self . add_module ( \"s\" , s ) if t is None : self . t = torch . zeros_like else : self . add_module ( \"t\" , t ) def forward ( self , z ): z_masked = self . b * z scale = self . s ( z_masked ) nan = torch . tensor ( np . nan , dtype = z . dtype , device = z . device ) scale = torch . where ( torch . isfinite ( scale ), scale , nan ) trans = self . t ( z_masked ) trans = torch . where ( torch . isfinite ( trans ), trans , nan ) z_ = z_masked + ( 1 - self . b ) * ( z * torch . exp ( scale ) + trans ) log_det = torch . sum (( 1 - self . b ) * scale , dim = list ( range ( 1 , self . b . dim ()))) return z_ , log_det def inverse ( self , z ): z_masked = self . b * z scale = self . s ( z_masked ) nan = torch . tensor ( np . nan , dtype = z . dtype , device = z . device ) scale = torch . where ( torch . isfinite ( scale ), scale , nan ) trans = self . t ( z_masked ) trans = torch . where ( torch . isfinite ( trans ), trans , nan ) z_ = z_masked + ( 1 - self . b ) * ( z - trans ) * torch . exp ( - scale ) log_det = - torch . sum (( 1 - self . b ) * scale , dim = list ( range ( 1 , self . b . dim ()))) return z_ , log_det __init__ ( b , t = None , s = None ) Constructor Parameters: Name Type Description Default b mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s required t translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied None s scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied None Source code in normflows/flows/affine/coupling.py 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 def __init__ ( self , b , t = None , s = None ): \"\"\"Constructor Args: b: mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s t: translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied s: scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied \"\"\" super () . __init__ () self . b_cpu = b . view ( 1 , * b . size ()) self . register_buffer ( \"b\" , self . b_cpu ) if s is None : self . s = torch . zeros_like else : self . add_module ( \"s\" , s ) if t is None : self . t = torch . zeros_like else : self . add_module ( \"t\" , t ) coupling_test glow GlowBlock Bases: Flow Glow: Generative Flow with Invertible 1\u00d71 Convolutions, arXiv: 1807.03039 One Block of the Glow model, comprised of MaskedAffineFlow (affine coupling layer) Invertible1x1Conv (dropped if there is only one channel) ActNorm (first batch used for initialization) Source code in normflows/flows/affine/glow.py 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 class GlowBlock ( Flow ): \"\"\"Glow: Generative Flow with Invertible 1\u00d71 Convolutions, [arXiv: 1807.03039](https://arxiv.org/abs/1807.03039) One Block of the Glow model, comprised of - MaskedAffineFlow (affine coupling layer) - Invertible1x1Conv (dropped if there is only one channel) - ActNorm (first batch used for initialization) \"\"\" def __init__ ( self , channels , hidden_channels , scale = True , scale_map = \"sigmoid\" , split_mode = \"channel\" , leaky = 0.0 , init_zeros = True , use_lu = True , net_actnorm = False , ): \"\"\"Constructor Args: channels: Number of channels of the data hidden_channels: number of channels in the hidden layer of the ConvNet scale: Flag, whether to include scale in affine coupling layer scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class leaky: Leaky parameter of LeakyReLUs of ConvNet2d init_zeros: Flag whether to initialize last conv layer with zeros use_lu: Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers logscale_factor: Factor which can be used to control the scale of the log scale factor, see [source](https://github.com/openai/glow) \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Coupling layer kernel_size = ( 3 , 1 , 3 ) num_param = 2 if scale else 1 if \"channel\" == split_mode : channels_ = (( channels + 1 ) // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * ( channels // 2 ),) elif \"channel_inv\" == split_mode : channels_ = ( channels // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * (( channels + 1 ) // 2 ),) elif \"checkerboard\" in split_mode : channels_ = ( channels ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * channels ,) else : raise NotImplementedError ( \"Mode \" + split_mode + \" is not implemented.\" ) param_map = nets . ConvNet2d ( channels_ , kernel_size , leaky , init_zeros , actnorm = net_actnorm ) self . flows += [ AffineCouplingBlock ( param_map , scale , scale_map , split_mode )] # Invertible 1x1 convolution if channels > 1 : self . flows += [ Invertible1x1Conv ( channels , use_lu )] # Activation normalization self . flows += [ ActNorm (( channels ,) + ( 1 , 1 ))] def forward ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for flow in self . flows : z , log_det = flow ( z ) log_det_tot += log_det return z , log_det_tot def inverse ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_det_tot += log_det return z , log_det_tot __init__ ( channels , hidden_channels , scale = True , scale_map = 'sigmoid' , split_mode = 'channel' , leaky = 0.0 , init_zeros = True , use_lu = True , net_actnorm = False ) Constructor Parameters: Name Type Description Default channels Number of channels of the data required hidden_channels number of channels in the hidden layer of the ConvNet required scale Flag, whether to include scale in affine coupling layer True scale_map Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow 'sigmoid' split_mode Splitting mode, for possible values see Split class 'channel' leaky Leaky parameter of LeakyReLUs of ConvNet2d 0.0 init_zeros Flag whether to initialize last conv layer with zeros True use_lu Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers True logscale_factor Factor which can be used to control the scale of the log scale factor, see source required Source code in normflows/flows/affine/glow.py 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 def __init__ ( self , channels , hidden_channels , scale = True , scale_map = \"sigmoid\" , split_mode = \"channel\" , leaky = 0.0 , init_zeros = True , use_lu = True , net_actnorm = False , ): \"\"\"Constructor Args: channels: Number of channels of the data hidden_channels: number of channels in the hidden layer of the ConvNet scale: Flag, whether to include scale in affine coupling layer scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class leaky: Leaky parameter of LeakyReLUs of ConvNet2d init_zeros: Flag whether to initialize last conv layer with zeros use_lu: Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers logscale_factor: Factor which can be used to control the scale of the log scale factor, see [source](https://github.com/openai/glow) \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Coupling layer kernel_size = ( 3 , 1 , 3 ) num_param = 2 if scale else 1 if \"channel\" == split_mode : channels_ = (( channels + 1 ) // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * ( channels // 2 ),) elif \"channel_inv\" == split_mode : channels_ = ( channels // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * (( channels + 1 ) // 2 ),) elif \"checkerboard\" in split_mode : channels_ = ( channels ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * channels ,) else : raise NotImplementedError ( \"Mode \" + split_mode + \" is not implemented.\" ) param_map = nets . ConvNet2d ( channels_ , kernel_size , leaky , init_zeros , actnorm = net_actnorm ) self . flows += [ AffineCouplingBlock ( param_map , scale , scale_map , split_mode )] # Invertible 1x1 convolution if channels > 1 : self . flows += [ Invertible1x1Conv ( channels , use_lu )] # Activation normalization self . flows += [ ActNorm (( channels ,) + ( 1 , 1 ))] glow_test base Composite Bases: Flow Composes several flows into one, in the order they are given. Source code in normflows/flows/base.py 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 class Composite ( Flow ): \"\"\" Composes several flows into one, in the order they are given. \"\"\" def __init__ ( self , flows ): \"\"\"Constructor Args: flows: Iterable of flows to composite \"\"\" super () . __init__ () self . _flows = nn . ModuleList ( flows ) @staticmethod def _cascade ( inputs , funcs ): batch_size = inputs . shape [ 0 ] outputs = inputs total_logabsdet = torch . zeros ( batch_size ) for func in funcs : outputs , logabsdet = func ( outputs ) total_logabsdet += logabsdet return outputs , total_logabsdet def forward ( self , inputs ): funcs = self . _flows return self . _cascade ( inputs , funcs ) def inverse ( self , inputs ): funcs = ( flow . inverse for flow in self . _flows [:: - 1 ]) return self . _cascade ( inputs , funcs ) __init__ ( flows ) Constructor Parameters: Name Type Description Default flows Iterable of flows to composite required Source code in normflows/flows/base.py 53 54 55 56 57 58 59 60 def __init__ ( self , flows ): \"\"\"Constructor Args: flows: Iterable of flows to composite \"\"\" super () . __init__ () self . _flows = nn . ModuleList ( flows ) Flow Bases: Module Generic class for flow functions Source code in normflows/flows/base.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 class Flow ( nn . Module ): \"\"\" Generic class for flow functions \"\"\" def __init__ ( self ): super () . __init__ () def forward ( self , z ): \"\"\" Args: z: input variable, first dimension is batch dim Returns: transformed z and log of absolute determinant \"\"\" raise NotImplementedError ( \"Forward pass has not been implemented.\" ) def inverse ( self , z ): raise NotImplementedError ( \"This flow has no algebraic inverse.\" ) forward ( z ) Parameters: Name Type Description Default z input variable, first dimension is batch dim required Returns: Type Description transformed z and log of absolute determinant Source code in normflows/flows/base.py 13 14 15 16 17 18 19 20 21 def forward ( self , z ): \"\"\" Args: z: input variable, first dimension is batch dim Returns: transformed z and log of absolute determinant \"\"\" raise NotImplementedError ( \"Forward pass has not been implemented.\" ) Reverse Bases: Flow Switches the forward transform of a flow layer with its inverse and vice versa Source code in normflows/flows/base.py 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 class Reverse ( Flow ): \"\"\" Switches the forward transform of a flow layer with its inverse and vice versa \"\"\" def __init__ ( self , flow ): \"\"\"Constructor Args: flow: Flow layer to be reversed \"\"\" super () . __init__ () self . flow = flow def forward ( self , z ): return self . flow . inverse ( z ) def inverse ( self , z ): return self . flow . forward ( z ) __init__ ( flow ) Constructor Parameters: Name Type Description Default flow Flow layer to be reversed required Source code in normflows/flows/base.py 32 33 34 35 36 37 38 39 def __init__ ( self , flow ): \"\"\"Constructor Args: flow: Flow layer to be reversed \"\"\" super () . __init__ () self . flow = flow base_test flow_test FlowTest Bases: TestCase Generic test case for flow modules Source code in normflows/flows/flow_test.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 class FlowTest ( unittest . TestCase ): \"\"\" Generic test case for flow modules \"\"\" def assertClose ( self , actual , expected , atol = None , rtol = None ): assert_close ( actual , expected , atol = atol , rtol = rtol ) def checkForward ( self , flow , inputs , context = None ): # Do forward transform if context is None : outputs , log_det = flow ( inputs ) else : outputs , log_det = flow ( inputs , context ) # Check type assert outputs . dtype == inputs . dtype # Check shape assert outputs . shape == inputs . shape # Return results return outputs , log_det def checkInverse ( self , flow , inputs , context = None ): # Do inverse transform if context is None : outputs , log_det = flow . inverse ( inputs ) else : outputs , log_det = flow . inverse ( inputs , context ) # Check type assert outputs . dtype == inputs . dtype # Check shape assert outputs . shape == inputs . shape # Return results return outputs , log_det def checkForwardInverse ( self , flow , inputs , context = None , atol = None , rtol = None ): # Check forward outputs , log_det = self . checkForward ( flow , inputs , context ) # Check inverse input_ , log_det_ = self . checkInverse ( flow , outputs , context ) # Check identity self . assertClose ( input_ , inputs , atol , rtol ) ld_id = log_det + log_det_ self . assertClose ( ld_id , torch . zeros_like ( ld_id ), atol , rtol ) mixing Invertible1x1Conv Bases: Flow Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW Source code in normflows/flows/mixing.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 class Invertible1x1Conv ( Flow ): \"\"\" Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW \"\"\" def __init__ ( self , num_channels , use_lu = False ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q ) def _assemble_W ( self , inverse = False ): # assemble W from its components (P, L, U, S) L = torch . tril ( self . L , diagonal =- 1 ) + self . eye U = torch . triu ( self . U , diagonal = 1 ) + torch . diag ( self . sign_S * torch . exp ( self . log_S ) ) if inverse : if self . log_S . dtype == torch . float64 : L_inv = torch . inverse ( L ) U_inv = torch . inverse ( U ) else : L_inv = torch . inverse ( L . double ()) . type ( self . log_S . dtype ) U_inv = torch . inverse ( U . double ()) . type ( self . log_S . dtype ) W = U_inv @ L_inv @ self . P . t () else : W = self . P @ L @ U return W def forward ( self , z ): if self . use_lu : W = self . _assemble_W ( inverse = True ) log_det = - torch . sum ( self . log_S ) else : W_dtype = self . W . dtype if W_dtype == torch . float64 : W = torch . inverse ( self . W ) else : W = torch . inverse ( self . W . double ()) . type ( W_dtype ) W = W . view ( * W . size (), 1 , 1 ) log_det = - torch . slogdet ( self . W )[ 1 ] W = W . view ( self . num_channels , self . num_channels , 1 , 1 ) z_ = torch . nn . functional . conv2d ( z , W ) log_det = log_det * z . size ( 2 ) * z . size ( 3 ) return z_ , log_det def inverse ( self , z ): if self . use_lu : W = self . _assemble_W () log_det = torch . sum ( self . log_S ) else : W = self . W log_det = torch . slogdet ( self . W )[ 1 ] W = W . view ( self . num_channels , self . num_channels , 1 , 1 ) z_ = torch . nn . functional . conv2d ( z , W ) log_det = log_det * z . size ( 2 ) * z . size ( 3 ) return z_ , log_det __init__ ( num_channels , use_lu = False ) Constructor Parameters: Name Type Description Default num_channels Number of channels of the data required use_lu Flag whether to parametrize weights through the LU decomposition False Source code in normflows/flows/mixing.py 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 def __init__ ( self , num_channels , use_lu = False ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q ) InvertibleAffine Bases: Flow Invertible affine transformation without shift, i.e. one-dimensional version of the invertible 1x1 convolutions Source code in normflows/flows/mixing.py 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 class InvertibleAffine ( Flow ): \"\"\" Invertible affine transformation without shift, i.e. one-dimensional version of the invertible 1x1 convolutions \"\"\" def __init__ ( self , num_channels , use_lu = True ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q ) def _assemble_W ( self , inverse = False ): # assemble W from its components (P, L, U, S) L = torch . tril ( self . L , diagonal =- 1 ) + self . eye U = torch . triu ( self . U , diagonal = 1 ) + torch . diag ( self . sign_S * torch . exp ( self . log_S ) ) if inverse : if self . log_S . dtype == torch . float64 : L_inv = torch . inverse ( L ) U_inv = torch . inverse ( U ) else : L_inv = torch . inverse ( L . double ()) . type ( self . log_S . dtype ) U_inv = torch . inverse ( U . double ()) . type ( self . log_S . dtype ) W = U_inv @ L_inv @ self . P . t () else : W = self . P @ L @ U return W def forward ( self , z , context = None ): if self . use_lu : W = self . _assemble_W ( inverse = True ) log_det = - torch . sum ( self . log_S ) else : W_dtype = self . W . dtype if W_dtype == torch . float64 : W = torch . inverse ( self . W ) else : W = torch . inverse ( self . W . double ()) . type ( W_dtype ) log_det = - torch . slogdet ( self . W )[ 1 ] z_ = z @ W return z_ , log_det def inverse ( self , z , context = None ): if self . use_lu : W = self . _assemble_W () log_det = torch . sum ( self . log_S ) else : W = self . W log_det = torch . slogdet ( self . W )[ 1 ] z_ = z @ W return z_ , log_det __init__ ( num_channels , use_lu = True ) Constructor Parameters: Name Type Description Default num_channels Number of channels of the data required use_lu Flag whether to parametrize weights through the LU decomposition True Source code in normflows/flows/mixing.py 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 def __init__ ( self , num_channels , use_lu = True ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q ) LULinearPermute Bases: Flow Fixed permutation combined with a linear transformation parametrized using the LU decomposition, used in https://arxiv.org/abs/1906.04032 Source code in normflows/flows/mixing.py 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 class LULinearPermute ( Flow ): \"\"\" Fixed permutation combined with a linear transformation parametrized using the LU decomposition, used in https://arxiv.org/abs/1906.04032 \"\"\" def __init__ ( self , num_channels , identity_init = True ): \"\"\"Constructor Args: num_channels: Number of dimensions of the data identity_init: Flag, whether to initialize linear transform as identity matrix \"\"\" # Initialize super () . __init__ () # Define modules self . permutation = _RandomPermutation ( num_channels ) self . linear = _LULinear ( num_channels , identity_init = identity_init ) def forward ( self , z , context = None ): z , log_det = self . linear . inverse ( z , context = context ) z , _ = self . permutation . inverse ( z , context = context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , _ = self . permutation ( z , context = context ) z , log_det = self . linear ( z , context = context ) return z , log_det . view ( - 1 ) __init__ ( num_channels , identity_init = True ) Constructor Parameters: Name Type Description Default num_channels Number of dimensions of the data required identity_init Flag, whether to initialize linear transform as identity matrix True Source code in normflows/flows/mixing.py 541 542 543 544 545 546 547 548 549 550 551 552 553 def __init__ ( self , num_channels , identity_init = True ): \"\"\"Constructor Args: num_channels: Number of dimensions of the data identity_init: Flag, whether to initialize linear transform as identity matrix \"\"\" # Initialize super () . __init__ () # Define modules self . permutation = _RandomPermutation ( num_channels ) self . linear = _LULinear ( num_channels , identity_init = identity_init ) Permute Bases: Flow Permutation features along the channel dimension Source code in normflows/flows/mixing.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 class Permute ( Flow ): \"\"\" Permutation features along the channel dimension \"\"\" def __init__ ( self , num_channels , mode = \"shuffle\" ): \"\"\"Constructor Args: num_channel: Number of channels mode: Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part \"\"\" super () . __init__ () self . mode = mode self . num_channels = num_channels if self . mode == \"shuffle\" : perm = torch . randperm ( self . num_channels ) inv_perm = torch . empty_like ( perm ) . scatter_ ( dim = 0 , index = perm , src = torch . arange ( self . num_channels ) ) self . register_buffer ( \"perm\" , perm ) self . register_buffer ( \"inv_perm\" , inv_perm ) def forward ( self , z , context = None ): if self . mode == \"shuffle\" : z = z [:, self . perm , ... ] elif self . mode == \"swap\" : z1 = z [:, : self . num_channels // 2 , ... ] z2 = z [:, self . num_channels // 2 :, ... ] z = torch . cat ([ z2 , z1 ], dim = 1 ) else : raise NotImplementedError ( \"The mode \" + self . mode + \" is not implemented.\" ) log_det = torch . zeros ( len ( z ), device = z . device ) return z , log_det def inverse ( self , z , context = None ): if self . mode == \"shuffle\" : z = z [:, self . inv_perm , ... ] elif self . mode == \"swap\" : z1 = z [:, : ( self . num_channels + 1 ) // 2 , ... ] z2 = z [:, ( self . num_channels + 1 ) // 2 :, ... ] z = torch . cat ([ z2 , z1 ], dim = 1 ) else : raise NotImplementedError ( \"The mode \" + self . mode + \" is not implemented.\" ) log_det = torch . zeros ( len ( z ), device = z . device ) return z , log_det __init__ ( num_channels , mode = 'shuffle' ) Constructor Parameters: Name Type Description Default num_channel Number of channels required mode Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part 'shuffle' Source code in normflows/flows/mixing.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 def __init__ ( self , num_channels , mode = \"shuffle\" ): \"\"\"Constructor Args: num_channel: Number of channels mode: Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part \"\"\" super () . __init__ () self . mode = mode self . num_channels = num_channels if self . mode == \"shuffle\" : perm = torch . randperm ( self . num_channels ) inv_perm = torch . empty_like ( perm ) . scatter_ ( dim = 0 , index = perm , src = torch . arange ( self . num_channels ) ) self . register_buffer ( \"perm\" , perm ) self . register_buffer ( \"inv_perm\" , inv_perm ) mixing_test neural_spline autoregressive Implementations of autoregressive transforms. Code taken from https://github.com/bayesiains/nsf autoregressive_test Tests for the autoregressive transforms. Code partially taken from https://github.com/bayesiains/nsf coupling Implementations of various coupling layers. Code taken from https://github.com/bayesiains/nsf Coupling Bases: Flow A base class for coupling layers. Supports 2D inputs (NxD), as well as 4D inputs for images (NxCxHxW). For images the splitting is done on the channel dimension, using the provided 1D mask. Source code in normflows/flows/neural_spline/coupling.py 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 class Coupling ( Flow ): \"\"\"A base class for coupling layers. Supports 2D inputs (NxD), as well as 4D inputs for images (NxCxHxW). For images the splitting is done on the channel dimension, using the provided 1D mask.\"\"\" def __init__ ( self , mask , transform_net_create_fn , unconditional_transform = None ): \"\"\"Constructor. mask: a 1-dim tensor, tuple or list. It indexes inputs as follows: - if `mask[i] > 0`, `input[i]` will be transformed. - if `mask[i] <= 0`, `input[i]` will be passed unchanged. Args: mask \"\"\" mask = torch . as_tensor ( mask ) if mask . dim () != 1 : raise ValueError ( \"Mask must be a 1-dim tensor.\" ) if mask . numel () <= 0 : raise ValueError ( \"Mask can't be empty.\" ) super () . __init__ () self . features = len ( mask ) features_vector = torch . arange ( self . features ) self . register_buffer ( \"identity_features\" , features_vector . masked_select ( mask <= 0 ) ) self . register_buffer ( \"transform_features\" , features_vector . masked_select ( mask > 0 ) ) assert self . num_identity_features + self . num_transform_features == self . features self . transform_net = transform_net_create_fn ( self . num_identity_features , self . num_transform_features * self . _transform_dim_multiplier (), ) if unconditional_transform is None : self . unconditional_transform = None else : self . unconditional_transform = unconditional_transform ( features = self . num_identity_features ) @property def num_identity_features ( self ): return len ( self . identity_features ) @property def num_transform_features ( self ): return len ( self . transform_features ) def forward ( self , inputs , context = None ): if inputs . dim () not in [ 2 , 4 ]: raise ValueError ( \"Inputs must be a 2D or a 4D tensor.\" ) if inputs . shape [ 1 ] != self . features : raise ValueError ( \"Expected features = {} , got {} .\" . format ( self . features , inputs . shape [ 1 ]) ) identity_split = inputs [:, self . identity_features , ... ] transform_split = inputs [:, self . transform_features , ... ] transform_params = self . transform_net ( identity_split , context ) transform_split , logabsdet = self . _coupling_transform_forward ( inputs = transform_split , transform_params = transform_params ) if self . unconditional_transform is not None : identity_split , logabsdet_identity = self . unconditional_transform ( identity_split , context ) logabsdet += logabsdet_identity outputs = torch . empty_like ( inputs ) outputs [:, self . identity_features , ... ] = identity_split outputs [:, self . transform_features , ... ] = transform_split return outputs , logabsdet def inverse ( self , inputs , context = None ): if inputs . dim () not in [ 2 , 4 ]: raise ValueError ( \"Inputs must be a 2D or a 4D tensor.\" ) if inputs . shape [ 1 ] != self . features : raise ValueError ( \"Expected features = {} , got {} .\" . format ( self . features , inputs . shape [ 1 ]) ) identity_split = inputs [:, self . identity_features , ... ] transform_split = inputs [:, self . transform_features , ... ] logabsdet = 0.0 if self . unconditional_transform is not None : identity_split , logabsdet = self . unconditional_transform . inverse ( identity_split , context ) transform_params = self . transform_net ( identity_split , context ) transform_split , logabsdet_split = self . _coupling_transform_inverse ( inputs = transform_split , transform_params = transform_params ) logabsdet += logabsdet_split outputs = torch . empty_like ( inputs ) outputs [:, self . identity_features ] = identity_split outputs [:, self . transform_features ] = transform_split return outputs , logabsdet def _transform_dim_multiplier ( self ): \"\"\"Number of features to output for each transform dimension.\"\"\" raise NotImplementedError () def _coupling_transform_forward ( self , inputs , transform_params ): \"\"\"Forward pass of the coupling transform.\"\"\" raise NotImplementedError () def _coupling_transform_inverse ( self , inputs , transform_params ): \"\"\"Inverse of the coupling transform.\"\"\" raise NotImplementedError () __init__ ( mask , transform_net_create_fn , unconditional_transform = None ) Constructor. mask: a 1-dim tensor, tuple or list. It indexes inputs as follows: if mask[i] > 0 , input[i] will be transformed. if mask[i] <= 0 , input[i] will be passed unchanged. Source code in normflows/flows/neural_spline/coupling.py 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 def __init__ ( self , mask , transform_net_create_fn , unconditional_transform = None ): \"\"\"Constructor. mask: a 1-dim tensor, tuple or list. It indexes inputs as follows: - if `mask[i] > 0`, `input[i]` will be transformed. - if `mask[i] <= 0`, `input[i]` will be passed unchanged. Args: mask \"\"\" mask = torch . as_tensor ( mask ) if mask . dim () != 1 : raise ValueError ( \"Mask must be a 1-dim tensor.\" ) if mask . numel () <= 0 : raise ValueError ( \"Mask can't be empty.\" ) super () . __init__ () self . features = len ( mask ) features_vector = torch . arange ( self . features ) self . register_buffer ( \"identity_features\" , features_vector . masked_select ( mask <= 0 ) ) self . register_buffer ( \"transform_features\" , features_vector . masked_select ( mask > 0 ) ) assert self . num_identity_features + self . num_transform_features == self . features self . transform_net = transform_net_create_fn ( self . num_identity_features , self . num_transform_features * self . _transform_dim_multiplier (), ) if unconditional_transform is None : self . unconditional_transform = None else : self . unconditional_transform = unconditional_transform ( features = self . num_identity_features ) coupling_test Tests for the coupling Transforms. Code partially taken from https://github.com/bayesiains/nsf wrapper AutoregressiveRationalQuadraticSpline Bases: Flow Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see sources Source code in normflows/flows/neural_spline/wrapper.py 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 class AutoregressiveRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see [sources](https://github.com/bayesiains/nsf) \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch.nn.Module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = \"linear\" , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , ) def forward ( self , z , context = None ): z , log_det = self . mprqat . inverse ( z , context = context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . mprqat ( z , context = context ) return z , log_det . view ( - 1 ) __init__ ( num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = False , init_identity = True ) Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required num_context_channels int Number of context/conditional channels None num_bins int Number of bins 8 tail_bound int Bound of the spline tails 3 activation Module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 permute_mask bool Flag, permutes the mask of the NN False init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch.nn.Module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = \"linear\" , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , ) CircularAutoregressiveRationalQuadraticSpline Bases: Flow Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see sources Source code in normflows/flows/neural_spline/wrapper.py 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 class CircularAutoregressiveRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see [sources](https://github.com/bayesiains/nsf) \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = True , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN ind_circ (Iterable): Indices of the circular coordinates num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = tails , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , ) def forward ( self , z , context = None ): z , log_det = self . mprqat . inverse ( z , context = context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . mprqat ( z , context = context ) return z , log_det . view ( - 1 ) __init__ ( num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = True , init_identity = True ) Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required ind_circ Iterable Indices of the circular coordinates required num_context_channels int Number of context/conditional channels None num_bins int Number of bins 8 tail_bound int Bound of the spline tails 3 activation torch module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 permute_mask bool Flag, permutes the mask of the NN True init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = True , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN ind_circ (Iterable): Indices of the circular coordinates num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = tails , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , ) CircularCoupledRationalQuadraticSpline Bases: Flow Neural spline flow coupling layer with circular coordinates Source code in normflows/flows/neural_spline/wrapper.py 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 class CircularCoupledRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer with circular coordinates \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , mask = None , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels ind_circ (Iterable): Indices of the circular coordinates num_bins (int): Number of bins tail_bound (float or Iterable): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used mask (torch tensor): Mask to be used, alternating masked generated is None init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () if mask is None : mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ) features_vector = torch . arange ( num_input_channels ) identity_features = features_vector . masked_select ( mask <= 0 ) ind_circ = torch . tensor ( ind_circ ) ind_circ_id = [] for i , id in enumerate ( identity_features ): if id in ind_circ : ind_circ_id += [ i ] if torch . is_tensor ( tail_bound ): scale_pf = np . pi / tail_bound [ ind_circ_id ] else : scale_pf = np . pi / tail_bound def transform_net_create_fn ( in_features , out_features ): if len ( ind_circ_id ) > 0 : pf = PeriodicFeaturesElementwise ( in_features , ind_circ_id , scale_pf ) else : pf = None net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , preprocessing = pf , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . prqct = PiecewiseRationalQuadraticCoupling ( mask = mask , transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , apply_unconditional_transform = True , ) def forward ( self , z , context = None ): z , log_det = self . prqct . inverse ( z , context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . prqct ( z , context ) return z , log_det . view ( - 1 ) __init__ ( num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , mask = None , init_identity = True ) Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required num_context_channels int Number of context/conditional channels None ind_circ Iterable Indices of the circular coordinates required num_bins int Number of bins 8 tail_bound float or Iterable Bound of the spline tails 3.0 activation torch module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 reverse_mask bool Flag whether the reverse mask should be used False mask torch tensor Mask to be used, alternating masked generated is None None init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , mask = None , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels ind_circ (Iterable): Indices of the circular coordinates num_bins (int): Number of bins tail_bound (float or Iterable): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used mask (torch tensor): Mask to be used, alternating masked generated is None init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () if mask is None : mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ) features_vector = torch . arange ( num_input_channels ) identity_features = features_vector . masked_select ( mask <= 0 ) ind_circ = torch . tensor ( ind_circ ) ind_circ_id = [] for i , id in enumerate ( identity_features ): if id in ind_circ : ind_circ_id += [ i ] if torch . is_tensor ( tail_bound ): scale_pf = np . pi / tail_bound [ ind_circ_id ] else : scale_pf = np . pi / tail_bound def transform_net_create_fn ( in_features , out_features ): if len ( ind_circ_id ) > 0 : pf = PeriodicFeaturesElementwise ( in_features , ind_circ_id , scale_pf ) else : pf = None net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , preprocessing = pf , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . prqct = PiecewiseRationalQuadraticCoupling ( mask = mask , transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , apply_unconditional_transform = True , ) CoupledRationalQuadraticSpline Bases: Flow Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see source Source code in normflows/flows/neural_spline/wrapper.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 class CoupledRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see [source](https://github.com/bayesiains/nsf) \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tails = \"linear\" , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tails (str): Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval tail_bound (float): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () def transform_net_create_fn ( in_features , out_features ): net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net self . prqct = PiecewiseRationalQuadraticCoupling ( mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ), transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , # Setting True corresponds to equations (4), (5), (6) in the NSF paper: apply_unconditional_transform = True , ) def forward ( self , z , context = None ): z , log_det = self . prqct . inverse ( z , context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . prqct ( z , context ) return z , log_det . view ( - 1 ) __init__ ( num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tails = 'linear' , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , init_identity = True ) Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required num_context_channels int Number of context/conditional channels None num_bins int Number of bins 8 tails str Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval 'linear' tail_bound float Bound of the spline tails 3.0 activation torch module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 reverse_mask bool Flag whether the reverse mask should be used False init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tails = \"linear\" , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tails (str): Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval tail_bound (float): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () def transform_net_create_fn ( in_features , out_features ): net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net self . prqct = PiecewiseRationalQuadraticCoupling ( mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ), transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , # Setting True corresponds to equations (4), (5), (6) in the NSF paper: apply_unconditional_transform = True , ) wrapper_test normalization ActNorm Bases: AffineConstFlow An AffineConstFlow but with a data-dependent initialization, where on the very first batch we clever initialize the s,t so that the output is unit gaussian. As described in Glow paper. Source code in normflows/flows/normalization.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 class ActNorm ( AffineConstFlow ): \"\"\" An AffineConstFlow but with a data-dependent initialization, where on the very first batch we clever initialize the s,t so that the output is unit gaussian. As described in Glow paper. \"\"\" def __init__ ( self , * args , ** kwargs ): super () . __init__ ( * args , ** kwargs ) self . data_dep_init_done_cpu = torch . tensor ( 0.0 ) self . register_buffer ( \"data_dep_init_done\" , self . data_dep_init_done_cpu ) def forward ( self , z ): # first batch is used for initialization, c.f. batchnorm if not self . data_dep_init_done > 0.0 : assert self . s is not None and self . t is not None s_init = - torch . log ( z . std ( dim = self . batch_dims , keepdim = True ) + 1e-6 ) self . s . data = s_init . data self . t . data = ( - z . mean ( dim = self . batch_dims , keepdim = True ) * torch . exp ( self . s ) ) . data self . data_dep_init_done = torch . tensor ( 1.0 ) return super () . forward ( z ) def inverse ( self , z ): # first batch is used for initialization, c.f. batchnorm if not self . data_dep_init_done : assert self . s is not None and self . t is not None s_init = torch . log ( z . std ( dim = self . batch_dims , keepdim = True ) + 1e-6 ) self . s . data = s_init . data self . t . data = z . mean ( dim = self . batch_dims , keepdim = True ) . data self . data_dep_init_done = torch . tensor ( 1.0 ) return super () . inverse ( z ) BatchNorm Bases: Flow Batch Normalization with out considering the derivatives of the batch statistics, see arXiv: 1605.08803 Source code in normflows/flows/normalization.py 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 class BatchNorm ( Flow ): \"\"\" Batch Normalization with out considering the derivatives of the batch statistics, see [arXiv: 1605.08803](https://arxiv.org/abs/1605.08803) \"\"\" def __init__ ( self , eps = 1.0e-10 ): super () . __init__ () self . eps_cpu = torch . tensor ( eps ) self . register_buffer ( \"eps\" , self . eps_cpu ) def forward ( self , z ): \"\"\" Do batch norm over batch and sample dimension \"\"\" mean = torch . mean ( z , dim = 0 , keepdims = True ) std = torch . std ( z , dim = 0 , keepdims = True ) z_ = ( z - mean ) / torch . sqrt ( std ** 2 + self . eps ) log_det = torch . log ( 1 / torch . prod ( torch . sqrt ( std ** 2 + self . eps ))) . repeat ( z . size ()[ 0 ] ) return z_ , log_det forward ( z ) Do batch norm over batch and sample dimension Source code in normflows/flows/normalization.py 52 53 54 55 56 57 58 59 60 61 62 def forward ( self , z ): \"\"\" Do batch norm over batch and sample dimension \"\"\" mean = torch . mean ( z , dim = 0 , keepdims = True ) std = torch . std ( z , dim = 0 , keepdims = True ) z_ = ( z - mean ) / torch . sqrt ( std ** 2 + self . eps ) log_det = torch . log ( 1 / torch . prod ( torch . sqrt ( std ** 2 + self . eps ))) . repeat ( z . size ()[ 0 ] ) return z_ , log_det periodic PeriodicShift Bases: Flow Shift and wrap periodic coordinates Source code in normflows/flows/periodic.py 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 class PeriodicShift ( Flow ): \"\"\" Shift and wrap periodic coordinates \"\"\" def __init__ ( self , ind , bound = 1.0 , shift = 0.0 ): \"\"\"Constructor Args: ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval shift: Tensor, shift to be applied \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound if torch . is_tensor ( shift ): self . register_buffer ( \"shift\" , shift ) else : self . shift = shift def forward ( self , z ): z_ = z . clone () z_ [ ... , self . ind ] = ( torch . remainder ( z_ [ ... , self . ind ] + self . shift + self . bound , 2 * self . bound ) - self . bound ) return z_ , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device ) def inverse ( self , z ): z_ = z . clone () z_ [ ... , self . ind ] = ( torch . remainder ( z_ [ ... , self . ind ] - self . shift + self . bound , 2 * self . bound ) - self . bound ) return z_ , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device ) __init__ ( ind , bound = 1.0 , shift = 0.0 ) Constructor Parameters: Name Type Description Default ind Iterable, indices of coordinates to be mapped required bound Float or iterable, bound of interval 1.0 shift Tensor, shift to be applied 0.0 Source code in normflows/flows/periodic.py 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 def __init__ ( self , ind , bound = 1.0 , shift = 0.0 ): \"\"\"Constructor Args: ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval shift: Tensor, shift to be applied \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound if torch . is_tensor ( shift ): self . register_buffer ( \"shift\" , shift ) else : self . shift = shift PeriodicWrap Bases: Flow Map periodic coordinates to fixed interval Source code in normflows/flows/periodic.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 class PeriodicWrap ( Flow ): \"\"\" Map periodic coordinates to fixed interval \"\"\" def __init__ ( self , ind , bound = 1.0 ): \"\"\"Constructor ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound def forward ( self , z ): return z , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device ) def inverse ( self , z ): z_ = z . clone () z_ [ ... , self . ind ] = ( torch . remainder ( z_ [ ... , self . ind ] + self . bound , 2 * self . bound ) - self . bound ) return z_ , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device ) __init__ ( ind , bound = 1.0 ) Constructor ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval Source code in normflows/flows/periodic.py 11 12 13 14 15 16 17 18 19 20 21 22 def __init__ ( self , ind , bound = 1.0 ): \"\"\"Constructor ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound periodic_test planar Planar Bases: Flow Planar flow as introduced in arXiv: 1505.05770 f(z) = z + u * h(w * z + b) Source code in normflows/flows/planar.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class Planar ( Flow ): \"\"\"Planar flow as introduced in [arXiv: 1505.05770](https://arxiv.org/abs/1505.05770) ``` f(z) = z + u * h(w * z + b) ``` \"\"\" def __init__ ( self , shape , act = \"tanh\" , u = None , w = None , b = None ): \"\"\"Constructor of the planar flow Args: shape: shape of the latent variable z h: nonlinear function h of the planar flow (see definition of f above) u,w,b: optional initialization for parameters \"\"\" super () . __init__ () lim_w = np . sqrt ( 2.0 / np . prod ( shape )) lim_u = np . sqrt ( 2 ) if u is not None : self . u = nn . Parameter ( u ) else : self . u = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . u , - lim_u , lim_u ) if w is not None : self . w = nn . Parameter ( w ) else : self . w = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . w , - lim_w , lim_w ) if b is not None : self . b = nn . Parameter ( b ) else : self . b = nn . Parameter ( torch . zeros ( 1 )) self . act = act if act == \"tanh\" : self . h = torch . tanh elif act == \"leaky_relu\" : self . h = torch . nn . LeakyReLU ( negative_slope = 0.2 ) else : raise NotImplementedError ( \"Nonlinearity is not implemented.\" ) def forward ( self , z ): lin = torch . sum ( self . w * z , list ( range ( 1 , self . w . dim ())), keepdim = True ) + self . b inner = torch . sum ( self . w * self . u ) u = self . u + ( torch . log ( 1 + torch . exp ( inner )) - 1 - inner ) \\ * self . w / torch . sum ( self . w ** 2 ) # constraint w.T * u > -1 if self . act == \"tanh\" : h_ = lambda x : 1 / torch . cosh ( x ) ** 2 elif self . act == \"leaky_relu\" : h_ = lambda x : ( x < 0 ) * ( self . h . negative_slope - 1.0 ) + 1.0 z_ = z + u * self . h ( lin ) log_det = torch . log ( torch . abs ( 1 + torch . sum ( self . w * u ) * h_ ( lin . reshape ( - 1 )))) return z_ , log_det def inverse ( self , z ): if self . act != \"leaky_relu\" : raise NotImplementedError ( \"This flow has no algebraic inverse.\" ) lin = torch . sum ( self . w * z , list ( range ( 1 , self . w . dim ()))) + self . b a = ( lin < 0 ) * ( self . h . negative_slope - 1.0 ) + 1.0 # absorb leakyReLU slope into u inner = torch . sum ( self . w * self . u ) u = self . u + ( torch . log ( 1 + torch . exp ( inner )) - 1 - inner ) \\ * self . w / torch . sum ( self . w ** 2 ) dims = [ - 1 ] + ( u . dim () - 1 ) * [ 1 ] u = a . reshape ( * dims ) * u inner_ = torch . sum ( self . w * u , list ( range ( 1 , self . w . dim ()))) z_ = z - u * ( lin / ( 1 + inner_ )) . reshape ( * dims ) log_det = - torch . log ( torch . abs ( 1 + inner_ )) return z_ , log_det __init__ ( shape , act = 'tanh' , u = None , w = None , b = None ) Constructor of the planar flow Parameters: Name Type Description Default shape shape of the latent variable z required h nonlinear function h of the planar flow (see definition of f above) required u,w,b optional initialization for parameters required Source code in normflows/flows/planar.py 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 def __init__ ( self , shape , act = \"tanh\" , u = None , w = None , b = None ): \"\"\"Constructor of the planar flow Args: shape: shape of the latent variable z h: nonlinear function h of the planar flow (see definition of f above) u,w,b: optional initialization for parameters \"\"\" super () . __init__ () lim_w = np . sqrt ( 2.0 / np . prod ( shape )) lim_u = np . sqrt ( 2 ) if u is not None : self . u = nn . Parameter ( u ) else : self . u = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . u , - lim_u , lim_u ) if w is not None : self . w = nn . Parameter ( w ) else : self . w = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . w , - lim_w , lim_w ) if b is not None : self . b = nn . Parameter ( b ) else : self . b = nn . Parameter ( torch . zeros ( 1 )) self . act = act if act == \"tanh\" : self . h = torch . tanh elif act == \"leaky_relu\" : self . h = torch . nn . LeakyReLU ( negative_slope = 0.2 ) else : raise NotImplementedError ( \"Nonlinearity is not implemented.\" ) planar_test radial Radial Bases: Flow Radial flow as introduced in arXiv: 1505.05770 f(z) = z + beta * h(alpha, r) * (z - z_0) Source code in normflows/flows/radial.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 class Radial ( Flow ): \"\"\"Radial flow as introduced in [arXiv: 1505.05770](https://arxiv.org/abs/1505.05770) ``` f(z) = z + beta * h(alpha, r) * (z - z_0) ``` \"\"\" def __init__ ( self , shape , z_0 = None ): \"\"\"Constructor of the radial flow Args: shape: shape of the latent variable z z_0: parameter of the radial flow \"\"\" super () . __init__ () self . d_cpu = torch . prod ( torch . tensor ( shape )) self . register_buffer ( \"d\" , self . d_cpu ) self . beta = nn . Parameter ( torch . empty ( 1 )) lim = 1.0 / np . prod ( shape ) nn . init . uniform_ ( self . beta , - lim - 1.0 , lim - 1.0 ) self . alpha = nn . Parameter ( torch . empty ( 1 )) nn . init . uniform_ ( self . alpha , - lim , lim ) if z_0 is not None : self . z_0 = nn . Parameter ( z_0 ) else : self . z_0 = nn . Parameter ( torch . randn ( shape )[ None ]) def forward ( self , z ): beta = torch . log ( 1 + torch . exp ( self . beta )) - torch . abs ( self . alpha ) dz = z - self . z_0 r = torch . linalg . vector_norm ( dz , dim = list ( range ( 1 , self . z_0 . dim ())), keepdim = True ) h_arr = beta / ( torch . abs ( self . alpha ) + r ) h_arr_ = - beta * r / ( torch . abs ( self . alpha ) + r ) ** 2 z_ = z + h_arr * dz log_det = ( self . d - 1 ) * torch . log ( 1 + h_arr ) + torch . log ( 1 + h_arr + h_arr_ ) log_det = log_det . reshape ( - 1 ) return z_ , log_det __init__ ( shape , z_0 = None ) Constructor of the radial flow Parameters: Name Type Description Default shape shape of the latent variable z required z_0 parameter of the radial flow None Source code in normflows/flows/radial.py 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 def __init__ ( self , shape , z_0 = None ): \"\"\"Constructor of the radial flow Args: shape: shape of the latent variable z z_0: parameter of the radial flow \"\"\" super () . __init__ () self . d_cpu = torch . prod ( torch . tensor ( shape )) self . register_buffer ( \"d\" , self . d_cpu ) self . beta = nn . Parameter ( torch . empty ( 1 )) lim = 1.0 / np . prod ( shape ) nn . init . uniform_ ( self . beta , - lim - 1.0 , lim - 1.0 ) self . alpha = nn . Parameter ( torch . empty ( 1 )) nn . init . uniform_ ( self . alpha , - lim , lim ) if z_0 is not None : self . z_0 = nn . Parameter ( z_0 ) else : self . z_0 = nn . Parameter ( torch . randn ( shape )[ None ]) radial_test reshape Merge Bases: Split Same as Split but with forward and backward pass interchanged Source code in normflows/flows/reshape.py 88 89 90 91 92 93 94 95 96 97 98 99 100 class Merge ( Split ): \"\"\" Same as Split but with forward and backward pass interchanged \"\"\" def __init__ ( self , mode = \"channel\" ): super () . __init__ ( mode ) def forward ( self , z ): return super () . inverse ( z ) def inverse ( self , z ): return super () . forward ( z ) Split Bases: Flow Split features into two sets Source code in normflows/flows/reshape.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 class Split ( Flow ): \"\"\" Split features into two sets \"\"\" def __init__ ( self , mode = \"channel\" ): \"\"\"Constructor The splitting mode can be: - channel: Splits first feature dimension, usually channels, into two halfs - channel_inv: Same as channel, but with z1 and z2 flipped - checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even) - checkerboard_inv: Same as checkerboard, but with inverted coloring Args: mode: splitting mode \"\"\" super () . __init__ () self . mode = mode def forward ( self , z ): if self . mode == \"channel\" : z1 , z2 = z . chunk ( 2 , dim = 1 ) elif self . mode == \"channel_inv\" : z2 , z1 = z . chunk ( 2 , dim = 1 ) elif \"checkerboard\" in self . mode : n_dims = z . dim () cb0 = 0 cb1 = 1 for i in range ( 1 , n_dims ): cb0_ = cb0 cb1_ = cb1 cb0 = [ cb0_ if j % 2 == 0 else cb1_ for j in range ( z . size ( n_dims - i ))] cb1 = [ cb1_ if j % 2 == 0 else cb0_ for j in range ( z . size ( n_dims - i ))] cb = cb1 if \"inv\" in self . mode else cb0 cb = torch . tensor ( cb )[ None ] . repeat ( len ( z ), * (( n_dims - 1 ) * [ 1 ])) cb = cb . to ( z . device ) z_size = z . size () z1 = z . reshape ( - 1 )[ torch . nonzero ( cb . view ( - 1 ), as_tuple = False )] . view ( * z_size [: - 1 ], - 1 ) z2 = z . reshape ( - 1 )[ torch . nonzero (( 1 - cb ) . view ( - 1 ), as_tuple = False )] . view ( * z_size [: - 1 ], - 1 ) else : raise NotImplementedError ( \"Mode \" + self . mode + \" is not implemented.\" ) log_det = 0 return [ z1 , z2 ], log_det def inverse ( self , z ): z1 , z2 = z if self . mode == \"channel\" : z = torch . cat ([ z1 , z2 ], 1 ) elif self . mode == \"channel_inv\" : z = torch . cat ([ z2 , z1 ], 1 ) elif \"checkerboard\" in self . mode : n_dims = z1 . dim () z_size = list ( z1 . size ()) z_size [ - 1 ] *= 2 cb0 = 0 cb1 = 1 for i in range ( 1 , n_dims ): cb0_ = cb0 cb1_ = cb1 cb0 = [ cb0_ if j % 2 == 0 else cb1_ for j in range ( z_size [ n_dims - i ])] cb1 = [ cb1_ if j % 2 == 0 else cb0_ for j in range ( z_size [ n_dims - i ])] cb = cb1 if \"inv\" in self . mode else cb0 cb = torch . tensor ( cb )[ None ] . repeat ( z_size [ 0 ], * (( n_dims - 1 ) * [ 1 ])) cb = cb . to ( z1 . device ) z1 = z1 [ ... , None ] . repeat ( * ( n_dims * [ 1 ]), 2 ) . view ( * z_size [: - 1 ], - 1 ) z2 = z2 [ ... , None ] . repeat ( * ( n_dims * [ 1 ]), 2 ) . view ( * z_size [: - 1 ], - 1 ) z = cb * z1 + ( 1 - cb ) * z2 else : raise NotImplementedError ( \"Mode \" + self . mode + \" is not implemented.\" ) log_det = 0 return z , log_det __init__ ( mode = 'channel' ) Constructor The splitting mode can be: channel: Splits first feature dimension, usually channels, into two halfs channel_inv: Same as channel, but with z1 and z2 flipped checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even) checkerboard_inv: Same as checkerboard, but with inverted coloring Parameters: Name Type Description Default mode splitting mode 'channel' Source code in normflows/flows/reshape.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 def __init__ ( self , mode = \"channel\" ): \"\"\"Constructor The splitting mode can be: - channel: Splits first feature dimension, usually channels, into two halfs - channel_inv: Same as channel, but with z1 and z2 flipped - checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even) - checkerboard_inv: Same as checkerboard, but with inverted coloring Args: mode: splitting mode \"\"\" super () . __init__ () self . mode = mode Squeeze Bases: Flow Squeeze operation of multi-scale architecture, RealNVP or Glow paper Source code in normflows/flows/reshape.py 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 class Squeeze ( Flow ): \"\"\" Squeeze operation of multi-scale architecture, RealNVP or Glow paper \"\"\" def __init__ ( self ): \"\"\" Constructor \"\"\" super () . __init__ () def forward ( self , z ): log_det = 0 s = z . size () z = z . view ( s [ 0 ], s [ 1 ] // 4 , 2 , 2 , s [ 2 ], s [ 3 ]) z = z . permute ( 0 , 1 , 4 , 2 , 5 , 3 ) . contiguous () z = z . view ( s [ 0 ], s [ 1 ] // 4 , 2 * s [ 2 ], 2 * s [ 3 ]) return z , log_det def inverse ( self , z ): log_det = 0 s = z . size () z = z . view ( * s [: 2 ], s [ 2 ] // 2 , 2 , s [ 3 ] // 2 , 2 ) z = z . permute ( 0 , 1 , 3 , 5 , 2 , 4 ) . contiguous () z = z . view ( s [ 0 ], 4 * s [ 1 ], s [ 2 ] // 2 , s [ 3 ] // 2 ) return z , log_det __init__ () Constructor Source code in normflows/flows/reshape.py 108 109 110 111 112 def __init__ ( self ): \"\"\" Constructor \"\"\" super () . __init__ () residual Residual Bases: Flow Invertible residual net block, wrapper to the implementation of Chen et al., see sources Source code in normflows/flows/residual.py 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 class Residual ( Flow ): \"\"\" Invertible residual net block, wrapper to the implementation of Chen et al., see [sources](https://github.com/rtqichen/residual-flows) \"\"\" def __init__ ( self , net , reverse = True , reduce_memory = True , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" ): \"\"\"Constructor Args: net: Neural network, must be Lipschitz continuous with L < 1 reverse: Flag, if true the map ```f(x) = x + net(x)``` is applied in the inverse pass, otherwise it is done in forward reduce_memory: Flag, if true Neumann series and precomputations, for backward pass in forward pass are done geom_p: Parameter of the geometric distribution used for the Neumann series lamb: Parameter of the geometric distribution used for the Neumann series n_power_series: Number of terms in the Neumann series exact_trace: Flag, if true the trace of the Jacobian is computed exactly brute_force: Flag, if true the Jacobian is computed exactly in 2D n_samples: Number of samples used to estimate power series n_exact_terms: Number of terms always included in the power series n_dist: Distribution used for the power series, either \"geometric\" or \"poisson\" \"\"\" super () . __init__ () self . reverse = reverse self . iresblock = iResBlock ( net , n_samples = n_samples , n_exact_terms = n_exact_terms , neumann_grad = reduce_memory , grad_in_forward = reduce_memory , exact_trace = exact_trace , geom_p = geom_p , lamb = lamb , n_power_series = n_power_series , brute_force = brute_force , n_dist = n_dist , ) def forward ( self , z ): if self . reverse : z , log_det = self . iresblock . inverse ( z , 0 ) else : z , log_det = self . iresblock . forward ( z , 0 ) return z , - log_det . view ( - 1 ) def inverse ( self , z ): if self . reverse : z , log_det = self . iresblock . forward ( z , 0 ) else : z , log_det = self . iresblock . inverse ( z , 0 ) return z , - log_det . view ( - 1 ) __init__ ( net , reverse = True , reduce_memory = True , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = 'geometric' ) Constructor Parameters: Name Type Description Default net Neural network, must be Lipschitz continuous with L < 1 required reverse Flag, if true the map f(x) = x + net(x) is applied in the inverse pass, otherwise it is done in forward True reduce_memory Flag, if true Neumann series and precomputations, for backward pass in forward pass are done True geom_p Parameter of the geometric distribution used for the Neumann series 0.5 lamb Parameter of the geometric distribution used for the Neumann series 2.0 n_power_series Number of terms in the Neumann series None exact_trace Flag, if true the trace of the Jacobian is computed exactly False brute_force Flag, if true the Jacobian is computed exactly in 2D False n_samples Number of samples used to estimate power series 1 n_exact_terms Number of terms always included in the power series 2 n_dist Distribution used for the power series, either \"geometric\" or \"poisson\" 'geometric' Source code in normflows/flows/residual.py 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 def __init__ ( self , net , reverse = True , reduce_memory = True , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" ): \"\"\"Constructor Args: net: Neural network, must be Lipschitz continuous with L < 1 reverse: Flag, if true the map ```f(x) = x + net(x)``` is applied in the inverse pass, otherwise it is done in forward reduce_memory: Flag, if true Neumann series and precomputations, for backward pass in forward pass are done geom_p: Parameter of the geometric distribution used for the Neumann series lamb: Parameter of the geometric distribution used for the Neumann series n_power_series: Number of terms in the Neumann series exact_trace: Flag, if true the trace of the Jacobian is computed exactly brute_force: Flag, if true the Jacobian is computed exactly in 2D n_samples: Number of samples used to estimate power series n_exact_terms: Number of terms always included in the power series n_dist: Distribution used for the power series, either \"geometric\" or \"poisson\" \"\"\" super () . __init__ () self . reverse = reverse self . iresblock = iResBlock ( net , n_samples = n_samples , n_exact_terms = n_exact_terms , neumann_grad = reduce_memory , grad_in_forward = reduce_memory , exact_trace = exact_trace , geom_p = geom_p , lamb = lamb , n_power_series = n_power_series , brute_force = brute_force , n_dist = n_dist , ) iResBlock Bases: Module Source code in normflows/flows/residual.py 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 class iResBlock ( nn . Module ): def __init__ ( self , nnet , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" , neumann_grad = True , grad_in_forward = False , ): \"\"\" Args: nnet: a nn.Module n_power_series: number of power series. If not None, uses a biased approximation to logdet. exact_trace: if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. brute_force: Computes the exact logdet. Only available for 2D inputs. \"\"\" nn . Module . __init__ ( self ) self . nnet = nnet self . n_dist = n_dist self . geom_p = nn . Parameter ( torch . tensor ( np . log ( geom_p ) - np . log ( 1.0 - geom_p ))) self . lamb = nn . Parameter ( torch . tensor ( lamb )) self . n_samples = n_samples self . n_power_series = n_power_series self . exact_trace = exact_trace self . brute_force = brute_force self . n_exact_terms = n_exact_terms self . grad_in_forward = grad_in_forward self . neumann_grad = neumann_grad # store the samples of n. self . register_buffer ( \"last_n_samples\" , torch . zeros ( self . n_samples )) self . register_buffer ( \"last_firmom\" , torch . zeros ( 1 )) self . register_buffer ( \"last_secmom\" , torch . zeros ( 1 )) def forward ( self , x , logpx = None ): if logpx is None : y = x + self . nnet ( x ) return y else : g , logdetgrad = self . _logdetgrad ( x ) return x + g , logpx - logdetgrad def inverse ( self , y , logpy = None ): x = self . _inverse_fixed_point ( y ) if logpy is None : return x else : return x , logpy + self . _logdetgrad ( x )[ 1 ] def _inverse_fixed_point ( self , y , atol = 1e-5 , rtol = 1e-5 ): x , x_prev = y - self . nnet ( y ), y i = 0 tol = atol + y . abs () * rtol while not torch . all (( x - x_prev ) ** 2 / tol < 1 ): x , x_prev = y - self . nnet ( x ), x i += 1 if i > 1000 : break return x def _logdetgrad ( self , x ): \"\"\"Returns g(x) and ```logdet|d(x+g(x))/dx|```\"\"\" with torch . enable_grad (): if ( self . brute_force or not self . training ) and ( x . ndimension () == 2 and x . shape [ 1 ] == 2 ): ########################################### # Brute-force compute Jacobian determinant. ########################################### x = x . requires_grad_ ( True ) g = self . nnet ( x ) # Brute-force logdet only available for 2D. jac = batch_jacobian ( g , x ) batch_dets = ( jac [:, 0 , 0 ] + 1 ) * ( jac [:, 1 , 1 ] + 1 ) - jac [ :, 0 , 1 ] * jac [:, 1 , 0 ] return g , torch . log ( torch . abs ( batch_dets )) . view ( - 1 , 1 ) if self . n_dist == \"geometric\" : geom_p = torch . sigmoid ( self . geom_p ) . item () sample_fn = lambda m : geometric_sample ( geom_p , m ) rcdf_fn = lambda k , offset : geometric_1mcdf ( geom_p , k , offset ) elif self . n_dist == \"poisson\" : lamb = self . lamb . item () sample_fn = lambda m : poisson_sample ( lamb , m ) rcdf_fn = lambda k , offset : poisson_1mcdf ( lamb , k , offset ) if self . training : if self . n_power_series is None : # Unbiased estimation. lamb = self . lamb . item () n_samples = sample_fn ( self . n_samples ) n_power_series = max ( n_samples ) + self . n_exact_terms coeff_fn = ( lambda k : 1 / rcdf_fn ( k , self . n_exact_terms ) * sum ( n_samples >= k - self . n_exact_terms ) / len ( n_samples ) ) else : # Truncated estimation. n_power_series = self . n_power_series coeff_fn = lambda k : 1.0 else : # Unbiased estimation with more exact terms. lamb = self . lamb . item () n_samples = sample_fn ( self . n_samples ) n_power_series = max ( n_samples ) + 20 coeff_fn = ( lambda k : 1 / rcdf_fn ( k , 20 ) * sum ( n_samples >= k - 20 ) / len ( n_samples ) ) if not self . exact_trace : #################################### # Power series with trace estimator. #################################### vareps = torch . randn_like ( x ) # Choose the type of estimator. if self . training and self . neumann_grad : estimator_fn = neumann_logdet_estimator else : estimator_fn = basic_logdet_estimator # Do backprop-in-forward to save memory. if self . training and self . grad_in_forward : g , logdetgrad = mem_eff_wrapper ( estimator_fn , self . nnet , x , n_power_series , vareps , coeff_fn , self . training , ) else : x = x . requires_grad_ ( True ) g = self . nnet ( x ) logdetgrad = estimator_fn ( g , x , n_power_series , vareps , coeff_fn , self . training ) else : ############################################ # Power series with exact trace computation. ############################################ x = x . requires_grad_ ( True ) g = self . nnet ( x ) jac = batch_jacobian ( g , x ) logdetgrad = batch_trace ( jac ) jac_k = jac for k in range ( 2 , n_power_series + 1 ): jac_k = torch . bmm ( jac , jac_k ) logdetgrad = logdetgrad + ( - 1 ) ** ( k + 1 ) / k * coeff_fn ( k ) * batch_trace ( jac_k ) if self . training and self . n_power_series is None : self . last_n_samples . copy_ ( torch . tensor ( n_samples ) . to ( self . last_n_samples ) ) estimator = logdetgrad . detach () self . last_firmom . copy_ ( torch . mean ( estimator ) . to ( self . last_firmom )) self . last_secmom . copy_ ( torch . mean ( estimator ** 2 ) . to ( self . last_secmom )) return g , logdetgrad . view ( - 1 , 1 ) def extra_repr ( self ): return \"dist= {} , n_samples= {} , n_power_series= {} , neumann_grad= {} , exact_trace= {} , brute_force= {} \" . format ( self . n_dist , self . n_samples , self . n_power_series , self . neumann_grad , self . exact_trace , self . brute_force , ) __init__ ( nnet , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = 'geometric' , neumann_grad = True , grad_in_forward = False ) Parameters: Name Type Description Default nnet a nn.Module required n_power_series number of power series. If not None, uses a biased approximation to logdet. None exact_trace if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. False brute_force Computes the exact logdet. Only available for 2D inputs. False Source code in normflows/flows/residual.py 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 def __init__ ( self , nnet , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" , neumann_grad = True , grad_in_forward = False , ): \"\"\" Args: nnet: a nn.Module n_power_series: number of power series. If not None, uses a biased approximation to logdet. exact_trace: if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. brute_force: Computes the exact logdet. Only available for 2D inputs. \"\"\" nn . Module . __init__ ( self ) self . nnet = nnet self . n_dist = n_dist self . geom_p = nn . Parameter ( torch . tensor ( np . log ( geom_p ) - np . log ( 1.0 - geom_p ))) self . lamb = nn . Parameter ( torch . tensor ( lamb )) self . n_samples = n_samples self . n_power_series = n_power_series self . exact_trace = exact_trace self . brute_force = brute_force self . n_exact_terms = n_exact_terms self . grad_in_forward = grad_in_forward self . neumann_grad = neumann_grad # store the samples of n. self . register_buffer ( \"last_n_samples\" , torch . zeros ( self . n_samples )) self . register_buffer ( \"last_firmom\" , torch . zeros ( 1 )) self . register_buffer ( \"last_secmom\" , torch . zeros ( 1 )) residual_test stochastic HamiltonianMonteCarlo Bases: Flow Flow layer using the HMC proposal in Stochastic Normalising Flows See arXiv: 2002.06707 Source code in normflows/flows/stochastic.py 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 class HamiltonianMonteCarlo ( Flow ): \"\"\"Flow layer using the HMC proposal in Stochastic Normalising Flows See [arXiv: 2002.06707](https://arxiv.org/abs/2002.06707) \"\"\" def __init__ ( self , target , steps , log_step_size , log_mass , max_abs_grad = None ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. steps: The number of leapfrog steps log_step_size: The log step size used in the leapfrog integrator. shape (dim) log_mass: The log_mass determining the variance of the momentum samples. shape (dim) max_abs_grad: Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability.\"\"\" super () . __init__ () self . target = target self . steps = steps self . register_parameter ( \"log_step_size\" , torch . nn . Parameter ( log_step_size )) self . register_parameter ( \"log_mass\" , torch . nn . Parameter ( log_mass )) self . max_abs_grad = max_abs_grad def forward ( self , z ): # Draw momentum p = torch . randn_like ( z ) * torch . exp ( 0.5 * self . log_mass ) # leapfrog z_new = z . clone () p_new = p . clone () step_size = torch . exp ( self . log_step_size ) for i in range ( self . steps ): p_half = p_new - ( step_size / 2.0 ) * - self . gradlogP ( z_new ) z_new = z_new + step_size * ( p_half / torch . exp ( self . log_mass )) p_new = p_half - ( step_size / 2.0 ) * - self . gradlogP ( z_new ) # Metropolis Hastings correction probabilities = torch . exp ( self . target . log_prob ( z_new ) - self . target . log_prob ( z ) - 0.5 * torch . sum ( p_new ** 2 / torch . exp ( self . log_mass ), 1 ) + 0.5 * torch . sum ( p ** 2 / torch . exp ( self . log_mass ), 1 ) ) uniforms = torch . rand_like ( probabilities ) mask = uniforms < probabilities z_out = torch . where ( mask . unsqueeze ( 1 ), z_new , z ) return z_out , self . target . log_prob ( z ) - self . target . log_prob ( z_out ) def inverse ( self , z ): return self . forward ( z ) def gradlogP ( self , z ): z_ = z . detach () . requires_grad_ () logp = self . target . log_prob ( z_ ) grad = torch . autograd . grad ( logp , z_ , grad_outputs = torch . ones_like ( logp ))[ 0 ] if self . max_abs_grad : grad = torch . clamp ( grad , max = self . max_abs_grad , min =- self . max_abs_grad ) return grad __init__ ( target , steps , log_step_size , log_mass , max_abs_grad = None ) Constructor Parameters: Name Type Description Default target The stationary distribution of this Markov transition, i.e. the target distribution to sample from. required steps The number of leapfrog steps required log_step_size The log step size used in the leapfrog integrator. shape (dim) required log_mass The log_mass determining the variance of the momentum samples. shape (dim) required max_abs_grad Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability. None Source code in normflows/flows/stochastic.py 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 def __init__ ( self , target , steps , log_step_size , log_mass , max_abs_grad = None ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. steps: The number of leapfrog steps log_step_size: The log step size used in the leapfrog integrator. shape (dim) log_mass: The log_mass determining the variance of the momentum samples. shape (dim) max_abs_grad: Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability.\"\"\" super () . __init__ () self . target = target self . steps = steps self . register_parameter ( \"log_step_size\" , torch . nn . Parameter ( log_step_size )) self . register_parameter ( \"log_mass\" , torch . nn . Parameter ( log_mass )) self . max_abs_grad = max_abs_grad MetropolisHastings Bases: Flow Sampling through Metropolis Hastings in Stochastic Normalizing Flow See arXiv: 2002.06707 Source code in normflows/flows/stochastic.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 class MetropolisHastings ( Flow ): \"\"\"Sampling through Metropolis Hastings in Stochastic Normalizing Flow See [arXiv: 2002.06707](https://arxiv.org/abs/2002.06707) \"\"\" def __init__ ( self , target , proposal , steps ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. proposal: Proposal distribution steps: Number of MCMC steps to perform \"\"\" super () . __init__ () self . target = target self . proposal = proposal self . steps = steps def forward ( self , z ): # Initialize number of samples and log(det) num_samples = len ( z ) log_det = torch . zeros ( num_samples , dtype = z . dtype , device = z . device ) # Get log(p) for current samples log_p = self . target . log_prob ( z ) for i in range ( self . steps ): # Make proposal and get log(p) z_ , log_p_diff = self . proposal ( z ) log_p_ = self . target . log_prob ( z_ ) # Make acceptance decision w = torch . rand ( num_samples , dtype = z . dtype , device = z . device ) log_w_accept = log_p_ - log_p + log_p_diff w_accept = torch . clamp ( torch . exp ( log_w_accept ), max = 1 ) accept = w <= w_accept # Update samples, log(det), and log(p) z = torch . where ( accept . unsqueeze ( 1 ), z_ , z ) log_det_ = log_p - log_p_ log_det = torch . where ( accept , log_det + log_det_ , log_det ) log_p = torch . where ( accept , log_p_ , log_p ) return z , log_det def inverse ( self , z ): # Equivalent to forward pass return self . forward ( z ) __init__ ( target , proposal , steps ) Constructor Parameters: Name Type Description Default target The stationary distribution of this Markov transition, i.e. the target distribution to sample from. required proposal Proposal distribution required steps Number of MCMC steps to perform required Source code in normflows/flows/stochastic.py 12 13 14 15 16 17 18 19 20 21 22 23 def __init__ ( self , target , proposal , steps ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. proposal: Proposal distribution steps: Number of MCMC steps to perform \"\"\" super () . __init__ () self . target = target self . proposal = proposal self . steps = steps stochastic_test nets cnn ConvNet2d Bases: Module Convolutional Neural Network with leaky ReLU nonlinearities Source code in normflows/nets/cnn.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 class ConvNet2d ( nn . Module ): \"\"\" Convolutional Neural Network with leaky ReLU nonlinearities \"\"\" def __init__ ( self , channels , kernel_size , leaky = 0.0 , init_zeros = True , actnorm = False , weight_std = None , ): \"\"\"Constructor Args: channels: List of channels of conv layers, first entry is in_channels kernel_size: List of kernel sizes, same for height and width leaky: Leaky part of ReLU init_zeros: Flag whether last layer shall be initialized with zeros scale_output: Flag whether to scale output with a log scale parameter logscale_factor: Constant factor to be multiplied to log scaling actnorm: Flag whether activation normalization shall be done after each conv layer except output weight_std: Fixed std used to initialize every layer \"\"\" super () . __init__ () # Build network net = nn . ModuleList ([]) for i in range ( len ( kernel_size ) - 1 ): conv = nn . Conv2d ( channels [ i ], channels [ i + 1 ], kernel_size [ i ], padding = kernel_size [ i ] // 2 , bias = ( not actnorm ), ) if weight_std is not None : conv . weight . data . normal_ ( mean = 0.0 , std = weight_std ) net . append ( conv ) if actnorm : net . append ( utils . ActNorm (( channels [ i + 1 ],) + ( 1 , 1 ))) net . append ( nn . LeakyReLU ( leaky )) i = len ( kernel_size ) net . append ( nn . Conv2d ( channels [ i - 1 ], channels [ i ], kernel_size [ i - 1 ], padding = kernel_size [ i - 1 ] // 2 , ) ) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) self . net = nn . Sequential ( * net ) def forward ( self , x ): return self . net ( x ) __init__ ( channels , kernel_size , leaky = 0.0 , init_zeros = True , actnorm = False , weight_std = None ) Constructor Parameters: Name Type Description Default channels List of channels of conv layers, first entry is in_channels required kernel_size List of kernel sizes, same for height and width required leaky Leaky part of ReLU 0.0 init_zeros Flag whether last layer shall be initialized with zeros True scale_output Flag whether to scale output with a log scale parameter required logscale_factor Constant factor to be multiplied to log scaling required actnorm Flag whether activation normalization shall be done after each conv layer except output False weight_std Fixed std used to initialize every layer None Source code in normflows/nets/cnn.py 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 def __init__ ( self , channels , kernel_size , leaky = 0.0 , init_zeros = True , actnorm = False , weight_std = None , ): \"\"\"Constructor Args: channels: List of channels of conv layers, first entry is in_channels kernel_size: List of kernel sizes, same for height and width leaky: Leaky part of ReLU init_zeros: Flag whether last layer shall be initialized with zeros scale_output: Flag whether to scale output with a log scale parameter logscale_factor: Constant factor to be multiplied to log scaling actnorm: Flag whether activation normalization shall be done after each conv layer except output weight_std: Fixed std used to initialize every layer \"\"\" super () . __init__ () # Build network net = nn . ModuleList ([]) for i in range ( len ( kernel_size ) - 1 ): conv = nn . Conv2d ( channels [ i ], channels [ i + 1 ], kernel_size [ i ], padding = kernel_size [ i ] // 2 , bias = ( not actnorm ), ) if weight_std is not None : conv . weight . data . normal_ ( mean = 0.0 , std = weight_std ) net . append ( conv ) if actnorm : net . append ( utils . ActNorm (( channels [ i + 1 ],) + ( 1 , 1 ))) net . append ( nn . LeakyReLU ( leaky )) i = len ( kernel_size ) net . append ( nn . Conv2d ( channels [ i - 1 ], channels [ i ], kernel_size [ i - 1 ], padding = kernel_size [ i - 1 ] // 2 , ) ) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) self . net = nn . Sequential ( * net ) lipschitz LipschitzCNN Bases: Module Convolutional neural network which is Lipschitz continuous with Lipschitz constant L < 1 Source code in normflows/nets/lipschitz.py 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 class LipschitzCNN ( nn . Module ): \"\"\" Convolutional neural network which is Lipschitz continuous with Lipschitz constant L < 1 \"\"\" def __init__ ( self , channels , kernel_size , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\"Constructor Args: channels: Integer list with the number of channels of the layers kernel_size: Integer list of kernel sizes of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( kernel_size ) self . channels = channels self . kernel_size = kernel_size self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormConv2d ( in_channels = channels [ i ], out_channels = channels [ i + 1 ], kernel_size = kernel_size [ i ], stride = 1 , padding = kernel_size [ i ] // 2 , bias = True , coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers ) def forward ( self , x ): return self . net ( x ) __init__ ( channels , kernel_size , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True ) Constructor Parameters: Name Type Description Default channels Integer list with the number of channels of the layers required kernel_size Integer list of kernel sizes of the layers required lipschitz_const Maximum Lipschitz constant of each layer 0.97 max_lipschitz_iter Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used 5 lipschitz_tolerance Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 None init_zeros Flag, whether to initialize last layer approximately with zeros True Source code in normflows/nets/lipschitz.py 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 def __init__ ( self , channels , kernel_size , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\"Constructor Args: channels: Integer list with the number of channels of the layers kernel_size: Integer list of kernel sizes of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( kernel_size ) self . channels = channels self . kernel_size = kernel_size self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormConv2d ( in_channels = channels [ i ], out_channels = channels [ i + 1 ], kernel_size = kernel_size [ i ], stride = 1 , padding = kernel_size [ i ] // 2 , bias = True , coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers ) LipschitzMLP Bases: Module Fully connected neural net which is Lipschitz continuou with Lipschitz constant L < 1 Source code in normflows/nets/lipschitz.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 class LipschitzMLP ( nn . Module ): \"\"\"Fully connected neural net which is Lipschitz continuou with Lipschitz constant L < 1\"\"\" def __init__ ( self , channels , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\" Constructor channels: Integer list with the number of channels of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( channels ) - 1 self . channels = channels self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormLinear ( in_features = channels [ i ], out_features = channels [ i + 1 ], coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers ) def forward ( self , x ): return self . net ( x ) __init__ ( channels , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True ) Constructor channels: Integer list with the number of channels of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros Source code in normflows/nets/lipschitz.py 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 def __init__ ( self , channels , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\" Constructor channels: Integer list with the number of channels of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( channels ) - 1 self . channels = channels self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormLinear ( in_features = channels [ i ], out_features = channels [ i + 1 ], coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers ) projmax_ ( v ) Inplace argmax on absolute value. Source code in normflows/nets/lipschitz.py 651 652 653 654 655 656 def projmax_ ( v ): \"\"\"Inplace argmax on absolute value.\"\"\" ind = torch . argmax ( torch . abs ( v )) v . zero_ () v [ ind ] = 1 return v made Implementation of MADE. Code taken from https://github.com/bayesiains/nsf MADE Bases: Module Implementation of MADE. It can use either feedforward blocks or residual blocks (default is residual). Optionally, it can use batch norm or dropout within blocks (default is no). Source code in normflows/nets/made.py 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 class MADE ( nn . Module ): \"\"\"Implementation of MADE. It can use either feedforward blocks or residual blocks (default is residual). Optionally, it can use batch norm or dropout within blocks (default is no). \"\"\" def __init__ ( self , features , hidden_features , context_features = None , num_blocks = 2 , output_multiplier = 1 , use_residual_blocks = True , random_mask = False , permute_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , preprocessing = None , ): if use_residual_blocks and random_mask : raise ValueError ( \"Residual blocks can't be used with random masks.\" ) super () . __init__ () # Preprocessing if preprocessing is None : self . preprocessing = torch . nn . Identity () else : self . preprocessing = preprocessing # Initial layer. input_degrees_ = _get_input_degrees ( features ) if permute_mask : input_degrees_ = input_degrees_ [ torch . randperm ( features )] self . initial_layer = MaskedLinear ( in_degrees = input_degrees_ , out_features = hidden_features , autoregressive_features = features , random_mask = random_mask , is_output = False , ) if context_features is not None : self . context_layer = nn . Linear ( context_features , hidden_features ) # Residual blocks. blocks = [] if use_residual_blocks : block_constructor = MaskedResidualBlock else : block_constructor = MaskedFeedforwardBlock prev_out_degrees = self . initial_layer . degrees for _ in range ( num_blocks ): blocks . append ( block_constructor ( in_degrees = prev_out_degrees , autoregressive_features = features , context_features = context_features , random_mask = random_mask , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) ) prev_out_degrees = blocks [ - 1 ] . degrees self . blocks = nn . ModuleList ( blocks ) # Final layer. self . final_layer = MaskedLinear ( in_degrees = prev_out_degrees , out_features = features * output_multiplier , autoregressive_features = features , random_mask = random_mask , is_output = True , out_degrees_ = input_degrees_ , ) def forward ( self , inputs , context = None ): outputs = self . preprocessing ( inputs ) outputs = self . initial_layer ( outputs ) if context is not None : outputs += self . context_layer ( context ) for block in self . blocks : outputs = block ( outputs , context ) outputs = self . final_layer ( outputs ) return outputs MaskedFeedforwardBlock Bases: Module A feedforward block based on a masked linear module. NOTE In this implementation, the number of output features is taken to be equal to the number of input features. Source code in normflows/nets/made.py 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 class MaskedFeedforwardBlock ( nn . Module ): \"\"\"A feedforward block based on a masked linear module. **NOTE** In this implementation, the number of output features is taken to be equal to the number of input features. \"\"\" def __init__ ( self , in_degrees , autoregressive_features , context_features = None , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , ): super () . __init__ () features = len ( in_degrees ) # Batch norm. if use_batch_norm : self . batch_norm = nn . BatchNorm1d ( features , eps = 1e-3 ) else : self . batch_norm = None if context_features is not None : raise NotImplementedError () # Masked linear. self . linear = MaskedLinear ( in_degrees = in_degrees , out_features = features , autoregressive_features = autoregressive_features , random_mask = random_mask , is_output = False , ) self . degrees = self . linear . degrees # Activation and dropout. self . activation = activation self . dropout = nn . Dropout ( p = dropout_probability ) def forward ( self , inputs , context = None ): if context is not None : raise NotImplementedError () if self . batch_norm : outputs = self . batch_norm ( inputs ) else : outputs = inputs outputs = self . linear ( outputs ) outputs = self . activation ( outputs ) outputs = self . dropout ( outputs ) return outputs MaskedLinear Bases: Linear A linear module with a masked weight matrix. Source code in normflows/nets/made.py 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class MaskedLinear ( nn . Linear ): \"\"\"A linear module with a masked weight matrix.\"\"\" def __init__ ( self , in_degrees , out_features , autoregressive_features , random_mask , is_output , bias = True , out_degrees_ = None , ): super () . __init__ ( in_features = len ( in_degrees ), out_features = out_features , bias = bias ) mask , degrees = self . _get_mask_and_degrees ( in_degrees = in_degrees , out_features = out_features , autoregressive_features = autoregressive_features , random_mask = random_mask , is_output = is_output , out_degrees_ = out_degrees_ , ) self . register_buffer ( \"mask\" , mask ) self . register_buffer ( \"degrees\" , degrees ) @classmethod def _get_mask_and_degrees ( cls , in_degrees , out_features , autoregressive_features , random_mask , is_output , out_degrees_ = None , ): if is_output : if out_degrees_ is None : out_degrees_ = _get_input_degrees ( autoregressive_features ) out_degrees = tile ( out_degrees_ , out_features // autoregressive_features ) mask = ( out_degrees [ ... , None ] > in_degrees ) . float () else : if random_mask : min_in_degree = torch . min ( in_degrees ) . item () min_in_degree = min ( min_in_degree , autoregressive_features - 1 ) out_degrees = torch . randint ( low = min_in_degree , high = autoregressive_features , size = [ out_features ], dtype = torch . long , ) else : max_ = max ( 1 , autoregressive_features - 1 ) min_ = min ( 1 , autoregressive_features - 1 ) out_degrees = torch . arange ( out_features ) % max_ + min_ mask = ( out_degrees [ ... , None ] >= in_degrees ) . float () return mask , out_degrees def forward ( self , x ): return F . linear ( x , self . weight * self . mask , self . bias ) MaskedResidualBlock Bases: Module A residual block containing masked linear modules. Source code in normflows/nets/made.py 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 class MaskedResidualBlock ( nn . Module ): \"\"\"A residual block containing masked linear modules.\"\"\" def __init__ ( self , in_degrees , autoregressive_features , context_features = None , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , zero_initialization = True , ): if random_mask : raise ValueError ( \"Masked residual block can't be used with random masks.\" ) super () . __init__ () features = len ( in_degrees ) if context_features is not None : self . context_layer = nn . Linear ( context_features , features ) # Batch norm. self . use_batch_norm = use_batch_norm if use_batch_norm : self . batch_norm_layers = nn . ModuleList ( [ nn . BatchNorm1d ( features , eps = 1e-3 ) for _ in range ( 2 )] ) # Masked linear. linear_0 = MaskedLinear ( in_degrees = in_degrees , out_features = features , autoregressive_features = autoregressive_features , random_mask = False , is_output = False , ) linear_1 = MaskedLinear ( in_degrees = linear_0 . degrees , out_features = features , autoregressive_features = autoregressive_features , random_mask = False , is_output = False , ) self . linear_layers = nn . ModuleList ([ linear_0 , linear_1 ]) self . degrees = linear_1 . degrees if torch . all ( self . degrees >= in_degrees ) . item () != 1 : raise RuntimeError ( \"In a masked residual block, the output degrees can't be\" \" less than the corresponding input degrees.\" ) # Activation and dropout self . activation = activation self . dropout = nn . Dropout ( p = dropout_probability ) # Initialization. if zero_initialization : init . uniform_ ( self . linear_layers [ - 1 ] . weight , a =- 1e-3 , b = 1e-3 ) init . uniform_ ( self . linear_layers [ - 1 ] . bias , a =- 1e-3 , b = 1e-3 ) def forward ( self , inputs , context = None ): temps = inputs if self . use_batch_norm : temps = self . batch_norm_layers [ 0 ]( temps ) temps = self . activation ( temps ) temps = self . linear_layers [ 0 ]( temps ) if self . use_batch_norm : temps = self . batch_norm_layers [ 1 ]( temps ) temps = self . activation ( temps ) temps = self . dropout ( temps ) temps = self . linear_layers [ 1 ]( temps ) if context is not None : temps = F . glu ( torch . cat (( temps , self . context_layer ( context )), dim = 1 ), dim = 1 ) return inputs + temps made_test Tests for MADE. Code partially taken from https://github.com/bayesiains/nsf mlp MLP Bases: Module A multilayer perceptron with Leaky ReLU nonlinearities Source code in normflows/nets/mlp.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 class MLP ( nn . Module ): \"\"\" A multilayer perceptron with Leaky ReLU nonlinearities \"\"\" def __init__ ( self , layers , leaky = 0.0 , score_scale = None , output_fn = None , output_scale = None , init_zeros = False , dropout = None , ): \"\"\" layers: list of layer sizes from start to end leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used score_scale: Factor to apply to the scores, i.e. output before output_fn. output_fn: String, function to be applied to the output, either None, \"sigmoid\", \"relu\", \"tanh\", or \"clampexp\" output_scale: Rescale outputs if output_fn is specified, i.e. ```scale * output_fn(out / scale)``` init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see [arXiv 1807.03039](https://arxiv.org/abs/1807.03039)) dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done \"\"\" super () . __init__ () net = nn . ModuleList ([]) for k in range ( len ( layers ) - 2 ): net . append ( nn . Linear ( layers [ k ], layers [ k + 1 ])) net . append ( nn . LeakyReLU ( leaky )) if dropout is not None : net . append ( nn . Dropout ( p = dropout )) net . append ( nn . Linear ( layers [ - 2 ], layers [ - 1 ])) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) if output_fn is not None : if score_scale is not None : net . append ( utils . ConstScaleLayer ( score_scale )) if output_fn == \"sigmoid\" : net . append ( nn . Sigmoid ()) elif output_fn == \"relu\" : net . append ( nn . ReLU ()) elif output_fn == \"tanh\" : net . append ( nn . Tanh ()) elif output_fn == \"clampexp\" : net . append ( utils . ClampExp ()) else : NotImplementedError ( \"This output function is not implemented.\" ) if output_scale is not None : net . append ( utils . ConstScaleLayer ( output_scale )) self . net = nn . Sequential ( * net ) def forward ( self , x ): return self . net ( x ) __init__ ( layers , leaky = 0.0 , score_scale = None , output_fn = None , output_scale = None , init_zeros = False , dropout = None ) layers: list of layer sizes from start to end leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used score_scale: Factor to apply to the scores, i.e. output before output_fn. output_fn: String, function to be applied to the output, either None, \"sigmoid\", \"relu\", \"tanh\", or \"clampexp\" output_scale: Rescale outputs if output_fn is specified, i.e. scale * output_fn(out / scale) init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see arXiv 1807.03039 ) dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done Source code in normflows/nets/mlp.py 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 def __init__ ( self , layers , leaky = 0.0 , score_scale = None , output_fn = None , output_scale = None , init_zeros = False , dropout = None , ): \"\"\" layers: list of layer sizes from start to end leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used score_scale: Factor to apply to the scores, i.e. output before output_fn. output_fn: String, function to be applied to the output, either None, \"sigmoid\", \"relu\", \"tanh\", or \"clampexp\" output_scale: Rescale outputs if output_fn is specified, i.e. ```scale * output_fn(out / scale)``` init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see [arXiv 1807.03039](https://arxiv.org/abs/1807.03039)) dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done \"\"\" super () . __init__ () net = nn . ModuleList ([]) for k in range ( len ( layers ) - 2 ): net . append ( nn . Linear ( layers [ k ], layers [ k + 1 ])) net . append ( nn . LeakyReLU ( leaky )) if dropout is not None : net . append ( nn . Dropout ( p = dropout )) net . append ( nn . Linear ( layers [ - 2 ], layers [ - 1 ])) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) if output_fn is not None : if score_scale is not None : net . append ( utils . ConstScaleLayer ( score_scale )) if output_fn == \"sigmoid\" : net . append ( nn . Sigmoid ()) elif output_fn == \"relu\" : net . append ( nn . ReLU ()) elif output_fn == \"tanh\" : net . append ( nn . Tanh ()) elif output_fn == \"clampexp\" : net . append ( utils . ClampExp ()) else : NotImplementedError ( \"This output function is not implemented.\" ) if output_scale is not None : net . append ( utils . ConstScaleLayer ( output_scale )) self . net = nn . Sequential ( * net ) resnet ResidualBlock Bases: Module A general-purpose residual block. Works only with 1-dim inputs. Source code in normflows/nets/resnet.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 class ResidualBlock ( nn . Module ): \"\"\"A general-purpose residual block. Works only with 1-dim inputs.\"\"\" def __init__ ( self , features , context_features , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , zero_initialization = True , ): super () . __init__ () self . activation = activation self . use_batch_norm = use_batch_norm if use_batch_norm : self . batch_norm_layers = nn . ModuleList ( [ nn . BatchNorm1d ( features , eps = 1e-3 ) for _ in range ( 2 )] ) if context_features is not None : self . context_layer = nn . Linear ( context_features , features ) self . linear_layers = nn . ModuleList ( [ nn . Linear ( features , features ) for _ in range ( 2 )] ) self . dropout = nn . Dropout ( p = dropout_probability ) if zero_initialization : init . uniform_ ( self . linear_layers [ - 1 ] . weight , - 1e-3 , 1e-3 ) init . uniform_ ( self . linear_layers [ - 1 ] . bias , - 1e-3 , 1e-3 ) def forward ( self , inputs , context = None ): temps = inputs if self . use_batch_norm : temps = self . batch_norm_layers [ 0 ]( temps ) temps = self . activation ( temps ) temps = self . linear_layers [ 0 ]( temps ) if self . use_batch_norm : temps = self . batch_norm_layers [ 1 ]( temps ) temps = self . activation ( temps ) temps = self . dropout ( temps ) temps = self . linear_layers [ 1 ]( temps ) if context is not None : temps = F . glu ( torch . cat (( temps , self . context_layer ( context )), dim = 1 ), dim = 1 ) return inputs + temps ResidualNet Bases: Module A general-purpose residual network. Works only with 1-dim inputs. Source code in normflows/nets/resnet.py 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 class ResidualNet ( nn . Module ): \"\"\"A general-purpose residual network. Works only with 1-dim inputs.\"\"\" def __init__ ( self , in_features , out_features , hidden_features , context_features = None , num_blocks = 2 , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , preprocessing = None , ): super () . __init__ () self . hidden_features = hidden_features self . context_features = context_features self . preprocessing = preprocessing if context_features is not None : self . initial_layer = nn . Linear ( in_features + context_features , hidden_features ) else : self . initial_layer = nn . Linear ( in_features , hidden_features ) self . blocks = nn . ModuleList ( [ ResidualBlock ( features = hidden_features , context_features = context_features , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) for _ in range ( num_blocks ) ] ) self . final_layer = nn . Linear ( hidden_features , out_features ) def forward ( self , inputs , context = None ): if self . preprocessing is None : temps = inputs else : temps = self . preprocessing ( inputs ) if context is None : temps = self . initial_layer ( temps ) else : temps = self . initial_layer ( torch . cat (( temps , context ), dim = 1 )) for block in self . blocks : temps = block ( temps , context = context ) outputs = self . final_layer ( temps ) return outputs sampling hais HAIS Class which performs HAIS Source code in normflows/sampling/hais.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 class HAIS : \"\"\" Class which performs HAIS \"\"\" def __init__ ( self , betas , prior , target , num_leapfrog , step_size , log_mass ): \"\"\" Args: betas: Annealing schedule, the jth target is ```f_j(x) = f_0(x)^{\\beta_j} f_n(x)^{1-\\beta_j}``` where the target is proportional to f_0 and the prior is proportional to f_n. The number of intermediate steps is infered from the shape of betas. Should be of the form 1 = \\beta_0 > \\beta_1 > ... > \\beta_n = 0 prior: The prior distribution to start the HAIS chain. target: The target distribution from which we would like to draw weighted samples. num_leapfrog: Number of leapfrog steps in the HMC transitions. step_size: step_size to use for HMC transitions. log_mass: log_mass to use for HMC transitions. \"\"\" self . prior = prior self . target = target self . layers = [] n = betas . shape [ 0 ] - 1 for i in range ( n - 1 , 0 , - 1 ): intermediate_target = distributions . LinearInterpolation ( self . target , self . prior , betas [ i ] ) self . layers += [ flows . HamiltonianMonteCarlo ( intermediate_target , num_leapfrog , torch . log ( step_size ), log_mass ) ] def sample ( self , num_samples ): \"\"\"Run HAIS to draw samples from the target with appropriate weights. Args: num_samples: The number of samples to draw.a \"\"\" samples , log_weights = self . prior . forward ( num_samples ) log_weights = - log_weights for i in range ( len ( self . layers )): samples , log_weights_addition = self . layers [ i ] . forward ( samples ) log_weights += log_weights_addition log_weights += self . target . log_prob ( samples ) return samples , log_weights __init__ ( betas , prior , target , num_leapfrog , step_size , log_mass ) Parameters: Name Type Description Default betas Annealing schedule, the jth target is f_j(x) = f_0(x)^{\beta_j} f_n(x)^{1-\beta_j} where the target is proportional to f_0 and the prior is proportional to f_n. The number of intermediate steps is infered from the shape of betas. Should be of the form 1 = \beta_0 > \beta_1 > ... > \beta_n = 0 required prior The prior distribution to start the HAIS chain. required target The target distribution from which we would like to draw weighted samples. required num_leapfrog Number of leapfrog steps in the HMC transitions. required step_size step_size to use for HMC transitions. required log_mass log_mass to use for HMC transitions. required Source code in normflows/sampling/hais.py 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 def __init__ ( self , betas , prior , target , num_leapfrog , step_size , log_mass ): \"\"\" Args: betas: Annealing schedule, the jth target is ```f_j(x) = f_0(x)^{\\beta_j} f_n(x)^{1-\\beta_j}``` where the target is proportional to f_0 and the prior is proportional to f_n. The number of intermediate steps is infered from the shape of betas. Should be of the form 1 = \\beta_0 > \\beta_1 > ... > \\beta_n = 0 prior: The prior distribution to start the HAIS chain. target: The target distribution from which we would like to draw weighted samples. num_leapfrog: Number of leapfrog steps in the HMC transitions. step_size: step_size to use for HMC transitions. log_mass: log_mass to use for HMC transitions. \"\"\" self . prior = prior self . target = target self . layers = [] n = betas . shape [ 0 ] - 1 for i in range ( n - 1 , 0 , - 1 ): intermediate_target = distributions . LinearInterpolation ( self . target , self . prior , betas [ i ] ) self . layers += [ flows . HamiltonianMonteCarlo ( intermediate_target , num_leapfrog , torch . log ( step_size ), log_mass ) ] sample ( num_samples ) Run HAIS to draw samples from the target with appropriate weights. Parameters: Name Type Description Default num_samples The number of samples to draw.a required Source code in normflows/sampling/hais.py 37 38 39 40 41 42 43 44 45 46 47 48 49 def sample ( self , num_samples ): \"\"\"Run HAIS to draw samples from the target with appropriate weights. Args: num_samples: The number of samples to draw.a \"\"\" samples , log_weights = self . prior . forward ( num_samples ) log_weights = - log_weights for i in range ( len ( self . layers )): samples , log_weights_addition = self . layers [ i ] . forward ( samples ) log_weights += log_weights_addition log_weights += self . target . log_prob ( samples ) return samples , log_weights transforms Logit Bases: Flow Logit mapping of image tensor, see RealNVP paper logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) Source code in normflows/transforms.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 class Logit ( flows . Flow ): \"\"\"Logit mapping of image tensor, see RealNVP paper ``` logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) ``` \"\"\" def __init__ ( self , alpha = 0.05 ): \"\"\"Constructor Args: alpha: Alpha parameter, see above \"\"\" super () . __init__ () self . alpha = alpha def forward ( self , z ): beta = 1 - 2 * self . alpha sum_dims = list ( range ( 1 , z . dim ())) ls = torch . sum ( torch . nn . functional . logsigmoid ( z ), dim = sum_dims ) mls = torch . sum ( torch . nn . functional . logsigmoid ( - z ), dim = sum_dims ) log_det = - np . log ( beta ) * np . prod ([ * z . shape [ 1 :]]) + ls + mls z = ( torch . sigmoid ( z ) - self . alpha ) / beta return z , log_det def inverse ( self , z ): beta = 1 - 2 * self . alpha z = self . alpha + beta * z logz = torch . log ( z ) log1mz = torch . log ( 1 - z ) z = logz - log1mz sum_dims = list ( range ( 1 , z . dim ())) log_det = ( np . log ( beta ) * np . prod ([ * z . shape [ 1 :]]) - torch . sum ( logz , dim = sum_dims ) - torch . sum ( log1mz , dim = sum_dims ) ) return z , log_det __init__ ( alpha = 0.05 ) Constructor Parameters: Name Type Description Default alpha Alpha parameter, see above 0.05 Source code in normflows/transforms.py 17 18 19 20 21 22 23 24 def __init__ ( self , alpha = 0.05 ): \"\"\"Constructor Args: alpha: Alpha parameter, see above \"\"\" super () . __init__ () self . alpha = alpha Shift Bases: Flow Shift data by a fixed constant Default is -0.5 to shift data from interval [0, 1] to [-0.5, 0.5] Source code in normflows/transforms.py 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 class Shift ( flows . Flow ): \"\"\"Shift data by a fixed constant Default is -0.5 to shift data from interval [0, 1] to [-0.5, 0.5] \"\"\" def __init__ ( self , shift =- 0.5 ): \"\"\"Constructor Args: shift: Shift to apply to the data \"\"\" super () . __init__ () self . shift = shift def forward ( self , z ): z -= self . shift log_det = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) return z , log_det def inverse ( self , z ): z += self . shift log_det = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) return z , log_det __init__ ( shift =- 0.5 ) Constructor Parameters: Name Type Description Default shift Shift to apply to the data -0.5 Source code in normflows/transforms.py 57 58 59 60 61 62 63 64 def __init__ ( self , shift =- 0.5 ): \"\"\"Constructor Args: shift: Shift to apply to the data \"\"\" super () . __init__ () self . shift = shift transforms_test utils eval bitsPerDim ( model , x , y = None , trans = 'logit' , trans_param = [ 0.05 ]) Computes the bits per dim for a batch of data Parameters: Name Type Description Default model Model to compute bits per dim for required x Batch of data required y Class labels for batch of data if base distribution is class conditional None trans Transformation to be applied to images during training 'logit' trans_param List of parameters of the transformation [0.05] Returns: Type Description Bits per dim for data batch under model Source code in normflows/utils/eval.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 def bitsPerDim ( model , x , y = None , trans = \"logit\" , trans_param = [ 0.05 ]): \"\"\"Computes the bits per dim for a batch of data Args: model: Model to compute bits per dim for x: Batch of data y: Class labels for batch of data if base distribution is class conditional trans: Transformation to be applied to images during training trans_param: List of parameters of the transformation Returns: Bits per dim for data batch under model \"\"\" dims = torch . prod ( torch . tensor ( x . size ()[ 1 :])) if trans == \"logit\" : if y is None : log_q = model . log_prob ( x ) else : log_q = model . log_prob ( x , y ) sum_dims = list ( range ( 1 , x . dim ())) ls = torch . nn . LogSigmoid () sig_ = torch . sum ( ls ( x ) / np . log ( 2 ), sum_dims ) sig_ += torch . sum ( ls ( - x ) / np . log ( 2 ), sum_dims ) b = - log_q / dims / np . log ( 2 ) - np . log2 ( 1 - trans_param [ 0 ]) + 8 b += sig_ / dims else : raise NotImplementedError ( \"The transformation \" + trans + \" is not implemented.\" ) return b bitsPerDimDataset ( model , data_loader , class_cond = True , trans = 'logit' , trans_param = [ 0.05 ]) Computes average bits per dim for an entire dataset given by a data loader Parameters: Name Type Description Default model Model to compute bits per dim for required data_loader Data loader of dataset required class_cond Flag indicating whether model is class_conditional True trans Transformation to be applied to images during training 'logit' trans_param List of parameters of the transformation [0.05] Returns: Type Description Average bits per dim for dataset Source code in normflows/utils/eval.py 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 def bitsPerDimDataset ( model , data_loader , class_cond = True , trans = \"logit\" , trans_param = [ 0.05 ] ): \"\"\"Computes average bits per dim for an entire dataset given by a data loader Args: model: Model to compute bits per dim for data_loader: Data loader of dataset class_cond: Flag indicating whether model is class_conditional trans: Transformation to be applied to images during training trans_param: List of parameters of the transformation Returns: Average bits per dim for dataset \"\"\" n = 0 b_cum = 0 with torch . no_grad (): for x , y in iter ( data_loader ): b_ = bitsPerDim ( model , x , y . to ( x . device ) if class_cond else None , trans , trans_param ) b_np = b_ . to ( \"cpu\" ) . numpy () b_cum += np . nansum ( b_np ) n += len ( x ) - np . sum ( np . isnan ( b_np )) b = b_cum / n return b masks create_alternating_binary_mask ( features , even = True ) Creates a binary mask of a given dimension which alternates its masking. Parameters: Name Type Description Default features Dimension of mask. required even If True, even values are assigned 1s, odd 0s. If False, vice versa. True Returns: Type Description Alternating binary mask of type torch.Tensor. Source code in normflows/utils/masks.py 4 5 6 7 8 9 10 11 12 13 14 15 16 17 def create_alternating_binary_mask ( features , even = True ): \"\"\"Creates a binary mask of a given dimension which alternates its masking. Args: features: Dimension of mask. even: If True, even values are assigned 1s, odd 0s. If False, vice versa. Returns: Alternating binary mask of type torch.Tensor. \"\"\" mask = torch . zeros ( features ) . byte () start = 0 if even else 1 mask [ start :: 2 ] += 1 return mask create_mid_split_binary_mask ( features ) Creates a binary mask of a given dimension which splits its masking at the midpoint. Parameters: Name Type Description Default features Dimension of mask. required Returns: Type Description Binary mask split at midpoint of type torch.Tensor Source code in normflows/utils/masks.py 20 21 22 23 24 25 26 27 28 29 30 31 32 def create_mid_split_binary_mask ( features ): \"\"\"Creates a binary mask of a given dimension which splits its masking at the midpoint. Args: features: Dimension of mask. Returns: Binary mask split at midpoint of type torch.Tensor \"\"\" mask = torch . zeros ( features ) . byte () midpoint = features // 2 if features % 2 == 0 else features // 2 + 1 mask [: midpoint ] += 1 return mask create_random_binary_mask ( features , seed = None ) Creates a random binary mask of a given dimension with half of its entries randomly set to 1s. Parameters: Name Type Description Default features Dimension of mask. required seed Seed to be used None Returns: Type Description Binary mask with half of its entries set to 1s, of type torch.Tensor. Source code in normflows/utils/masks.py 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 def create_random_binary_mask ( features , seed = None ): \"\"\"Creates a random binary mask of a given dimension with half of its entries randomly set to 1s. Args: features: Dimension of mask. seed: Seed to be used Returns: Binary mask with half of its entries set to 1s, of type torch.Tensor. \"\"\" mask = torch . zeros ( features ) . byte () weights = torch . ones ( features ) . float () num_samples = features // 2 if features % 2 == 0 else features // 2 + 1 if seed is None : generator = None else : generator = torch . Generator () generator . manual_seed ( seed ) indices = torch . multinomial ( input = weights , num_samples = num_samples , replacement = False , generator = generator ) mask [ indices ] += 1 return mask nn ActNorm Bases: Module ActNorm layer with just one forward pass Source code in normflows/utils/nn.py 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 class ActNorm ( nn . Module ): \"\"\" ActNorm layer with just one forward pass \"\"\" def __init__ ( self , shape ): \"\"\"Constructor Args: shape: Same as shape in flows.ActNorm logscale_factor: Same as shape in flows.ActNorm \"\"\" super () . __init__ () self . actNorm = flows . ActNorm ( shape ) def forward ( self , input ): out , _ = self . actNorm ( input ) return out __init__ ( shape ) Constructor Parameters: Name Type Description Default shape Same as shape in flows.ActNorm required logscale_factor Same as shape in flows.ActNorm required Source code in normflows/utils/nn.py 30 31 32 33 34 35 36 37 38 39 def __init__ ( self , shape ): \"\"\"Constructor Args: shape: Same as shape in flows.ActNorm logscale_factor: Same as shape in flows.ActNorm \"\"\" super () . __init__ () self . actNorm = flows . ActNorm ( shape ) ClampExp Bases: Module Nonlinearity min(exp(lam * x), 1) Source code in normflows/utils/nn.py 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 class ClampExp ( nn . Module ): \"\"\" Nonlinearity min(exp(lam * x), 1) \"\"\" def __init__ ( self ): \"\"\"Constructor Args: lam: Lambda parameter \"\"\" super ( ClampExp , self ) . __init__ () def forward ( self , x ): one = torch . tensor ( 1.0 , device = x . device , dtype = x . dtype ) return torch . min ( torch . exp ( x ), one ) __init__ () Constructor Parameters: Name Type Description Default lam Lambda parameter required Source code in normflows/utils/nn.py 51 52 53 54 55 56 57 def __init__ ( self ): \"\"\"Constructor Args: lam: Lambda parameter \"\"\" super ( ClampExp , self ) . __init__ () ConstScaleLayer Bases: Module Scaling features by a fixed factor Source code in normflows/utils/nn.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 class ConstScaleLayer ( nn . Module ): \"\"\" Scaling features by a fixed factor \"\"\" def __init__ ( self , scale = 1.0 ): \"\"\"Constructor Args: scale: Scale to apply to features \"\"\" super () . __init__ () self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu ) def forward ( self , input ): return input * self . scale __init__ ( scale = 1.0 ) Constructor Parameters: Name Type Description Default scale Scale to apply to features 1.0 Source code in normflows/utils/nn.py 12 13 14 15 16 17 18 19 20 def __init__ ( self , scale = 1.0 ): \"\"\"Constructor Args: scale: Scale to apply to features \"\"\" super () . __init__ () self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu ) PeriodicFeaturesCat Bases: Module Converts a specified part of the input to periodic features by replacing those features f with [sin(scale * f), cos(scale * f)]. Note that this decreases the number of features and their order is changed. Source code in normflows/utils/nn.py 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 class PeriodicFeaturesCat ( nn . Module ): \"\"\" Converts a specified part of the input to periodic features by replacing those features f with [sin(scale * f), cos(scale * f)]. Note that this decreases the number of features and their order is changed. \"\"\" def __init__ ( self , ndim , ind , scale = 1.0 ): \"\"\" Constructor :param ndim: Int, number of dimensions :param ind: Iterable, indices of input elements to convert to periodic features :param scale: Scalar or iterable, used to scale inputs before converting them to periodic features \"\"\" super ( PeriodicFeaturesCat , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale def forward ( self , inputs ): inputs_ = inputs [ ... , self . ind ] inputs_ = self . scale * inputs_ inputs_sin = torch . sin ( inputs_ ) inputs_cos = torch . cos ( inputs_ ) out = torch . cat (( inputs_sin , inputs_cos , inputs [ ... , self . ind_ ]), - 1 ) return out __init__ ( ndim , ind , scale = 1.0 ) Constructor :param ndim: Int, number of dimensions :param ind: Iterable, indices of input elements to convert to periodic features :param scale: Scalar or iterable, used to scale inputs before converting them to periodic features Source code in normflows/utils/nn.py 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 def __init__ ( self , ndim , ind , scale = 1.0 ): \"\"\" Constructor :param ndim: Int, number of dimensions :param ind: Iterable, indices of input elements to convert to periodic features :param scale: Scalar or iterable, used to scale inputs before converting them to periodic features \"\"\" super ( PeriodicFeaturesCat , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale PeriodicFeaturesElementwise Bases: Module Converts a specified part of the input to periodic features by replacing those features f with w1 * sin(scale * f) + w2 * cos(scale * f). Note that this operation is done elementwise and, therefore, some information about the feature can be lost. Source code in normflows/utils/nn.py 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 class PeriodicFeaturesElementwise ( nn . Module ): \"\"\" Converts a specified part of the input to periodic features by replacing those features f with w1 * sin(scale * f) + w2 * cos(scale * f). Note that this operation is done elementwise and, therefore, some information about the feature can be lost. \"\"\" def __init__ ( self , ndim , ind , scale = 1.0 , bias = False , activation = None ): \"\"\"Constructor Args: ndim (int): number of dimensions ind (iterable): indices of input elements to convert to periodic features scale: Scalar or iterable, used to scale inputs before converting them to periodic features bias: Flag, whether to add a bias activation: Function or None, activation function to be applied \"\"\" super ( PeriodicFeaturesElementwise , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) self . weights = nn . Parameter ( torch . ones ( len ( self . ind ), 2 )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale self . apply_bias = bias if self . apply_bias : self . bias = nn . Parameter ( torch . zeros ( len ( self . ind ))) if activation is None : self . activation = torch . nn . Identity () else : self . activation = activation def forward ( self , inputs ): inputs_ = inputs [ ... , self . ind ] inputs_ = self . scale * inputs_ inputs_ = self . weights [:, 0 ] * torch . sin ( inputs_ ) + self . weights [ :, 1 ] * torch . cos ( inputs_ ) if self . apply_bias : inputs_ = inputs_ + self . bias inputs_ = self . activation ( inputs_ ) out = torch . cat (( inputs_ , inputs [ ... , self . ind_ ]), - 1 ) return out [ ... , self . inv_perm ] __init__ ( ndim , ind , scale = 1.0 , bias = False , activation = None ) Constructor Parameters: Name Type Description Default ndim int number of dimensions required ind iterable indices of input elements to convert to periodic features required scale Scalar or iterable, used to scale inputs before converting them to periodic features 1.0 bias Flag, whether to add a bias False activation Function or None, activation function to be applied None Source code in normflows/utils/nn.py 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 def __init__ ( self , ndim , ind , scale = 1.0 , bias = False , activation = None ): \"\"\"Constructor Args: ndim (int): number of dimensions ind (iterable): indices of input elements to convert to periodic features scale: Scalar or iterable, used to scale inputs before converting them to periodic features bias: Flag, whether to add a bias activation: Function or None, activation function to be applied \"\"\" super ( PeriodicFeaturesElementwise , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) self . weights = nn . Parameter ( torch . ones ( len ( self . ind ), 2 )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale self . apply_bias = bias if self . apply_bias : self . bias = nn . Parameter ( torch . zeros ( len ( self . ind ))) if activation is None : self . activation = torch . nn . Identity () else : self . activation = activation sum_except_batch ( x , num_batch_dims = 1 ) Sums all elements of x except for the first num_batch_dims dimensions. Source code in normflows/utils/nn.py 190 191 192 193 def sum_except_batch ( x , num_batch_dims = 1 ): \"\"\"Sums all elements of `x` except for the first `num_batch_dims` dimensions.\"\"\" reduce_dims = list ( range ( num_batch_dims , x . ndimension ())) return torch . sum ( x , dim = reduce_dims ) optim clear_grad ( model ) Set gradients of model parameter to None as this speeds up training, See youtube Parameters: Name Type Description Default model Model to clear gradients of required Source code in normflows/utils/optim.py 16 17 18 19 20 21 22 23 24 25 def clear_grad ( model ): \"\"\"Set gradients of model parameter to None as this speeds up training, See [youtube](https://www.youtube.com/watch?v=9mS1fIYj1So) Args: model: Model to clear gradients of \"\"\" for param in model . parameters (): param . grad = None set_requires_grad ( module , flag ) Sets requires_grad flag of all parameters of a torch.nn.module Parameters: Name Type Description Default module torch.nn.module required flag Flag to set requires_grad to required Source code in normflows/utils/optim.py 4 5 6 7 8 9 10 11 12 13 def set_requires_grad ( module , flag ): \"\"\"Sets requires_grad flag of all parameters of a torch.nn.module Args: module: torch.nn.module flag: Flag to set requires_grad to \"\"\" for param in module . parameters (): param . requires_grad = flag preprocessing Jitter Transform for dataloader, adds uniform jitter noise to data Source code in normflows/utils/preprocessing.py 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 class Jitter : \"\"\"Transform for dataloader, adds uniform jitter noise to data\"\"\" def __init__ ( self , scale = 1.0 / 256 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale def __call__ ( self , x ): eps = torch . rand_like ( x ) * self . scale x_ = x + eps return x_ __init__ ( scale = 1.0 / 256 ) Constructor Parameters: Name Type Description Default scale Scaling factor for noise 1.0 / 256 Source code in normflows/utils/preprocessing.py 31 32 33 34 35 36 37 def __init__ ( self , scale = 1.0 / 256 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale Logit Transform for dataloader logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) Source code in normflows/utils/preprocessing.py 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 class Logit : \"\"\"Transform for dataloader ``` logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) ``` \"\"\" def __init__ ( self , alpha = 0 ): \"\"\"Constructor Args: alpha: see above \"\"\" self . alpha = alpha def __call__ ( self , x ): x_ = self . alpha + ( 1 - self . alpha ) * x return torch . log ( x_ / ( 1 - x_ )) def inverse ( self , x ): return ( torch . sigmoid ( x ) - self . alpha ) / ( 1 - self . alpha ) __init__ ( alpha = 0 ) Constructor Parameters: Name Type Description Default alpha see above 0 Source code in normflows/utils/preprocessing.py 12 13 14 15 16 17 18 def __init__ ( self , alpha = 0 ): \"\"\"Constructor Args: alpha: see above \"\"\" self . alpha = alpha Scale Transform for dataloader, adds uniform jitter noise to data Source code in normflows/utils/preprocessing.py 45 46 47 48 49 50 51 52 53 54 55 56 57 class Scale : \"\"\"Transform for dataloader, adds uniform jitter noise to data\"\"\" def __init__ ( self , scale = 255.0 / 256.0 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale def __call__ ( self , x ): return x * self . scale __init__ ( scale = 255.0 / 256.0 ) Constructor Parameters: Name Type Description Default scale Scaling factor for noise 255.0 / 256.0 Source code in normflows/utils/preprocessing.py 48 49 50 51 52 53 54 def __init__ ( self , scale = 255.0 / 256.0 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale","title":"API"},{"location":"references/#api-references","text":"","title":"API references"},{"location":"references/#normflows.core","text":"","title":"core"},{"location":"references/#normflows.core.ClassCondFlow","text":"Bases: Module Class conditional normalizing Flow model, providing the class to be conditioned on only to the base distribution, as done e.g. in Glow Source code in normflows/core.py 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 class ClassCondFlow ( nn . Module ): \"\"\" Class conditional normalizing Flow model, providing the class to be conditioned on only to the base distribution, as done e.g. in [Glow](https://arxiv.org/abs/1807.03039) \"\"\" def __init__ ( self , q0 , flows ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) def forward_kld ( self , x , y ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return - torch . mean ( log_q ) def sample ( self , num_samples = 1 , y = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , y ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return log_q def save ( self , path ): \"\"\"Save state dict of model Args: param path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path ))","title":"ClassCondFlow"},{"location":"references/#normflows.core.ClassCondFlow.__init__","text":"Constructor Parameters: Name Type Description Default q0 Base distribution required flows List of flows required Source code in normflows/core.py 376 377 378 379 380 381 382 383 384 385 def __init__ ( self , q0 , flows ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows )","title":"__init__"},{"location":"references/#normflows.core.ClassCondFlow.forward_kld","text":"Estimates forward KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 def forward_kld ( self , x , y ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return - torch . mean ( log_q )","title":"forward_kld"},{"location":"references/#normflows.core.ClassCondFlow.load","text":"Load model from state dict Parameters: Name Type Description Default path Path including filename where to load model from required Source code in normflows/core.py 446 447 448 449 450 451 452 def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path ))","title":"load"},{"location":"references/#normflows.core.ClassCondFlow.log_prob","text":"Get log probability for batch Parameters: Name Type Description Default x Batch required y Classes of x required Returns: Type Description log probability Source code in normflows/core.py 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z , y ) return log_q","title":"log_prob"},{"location":"references/#normflows.core.ClassCondFlow.sample","text":"Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 y Classes to sample from, will be sampled uniformly if None None Returns: Type Description Samples, log probability Source code in normflows/core.py 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 def sample ( self , num_samples = 1 , y = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , y ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q","title":"sample"},{"location":"references/#normflows.core.ClassCondFlow.save","text":"Save state dict of model Parameters: Name Type Description Default param path Path including filename where to save model required Source code in normflows/core.py 438 439 440 441 442 443 444 def save ( self , path ): \"\"\"Save state dict of model Args: param path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path )","title":"save"},{"location":"references/#normflows.core.ConditionalNormalizingFlow","text":"Bases: NormalizingFlow Conditional normalizing flow model, providing condition, which is also called context, to both the base distribution and the flow layers Source code in normflows/core.py 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 class ConditionalNormalizingFlow ( NormalizingFlow ): \"\"\" Conditional normalizing flow model, providing condition, which is also called context, to both the base distribution and the flow layers \"\"\" def forward ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z , context = context ) return z def forward_and_log_det ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z , context = context ) log_det += log_d return z , log_det def inverse ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x , context = context ) return x def inverse_and_log_det ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x , context = context ) log_det += log_d return x , log_det def sample ( self , num_samples = 1 , context = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw context: Batch of conditions/context Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , context = context ) for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det return z , log_q def log_prob ( self , x , context = None ): \"\"\"Get log probability for batch Args: x: Batch context: Batch of conditions/context Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return log_q def forward_kld ( self , x , context = None ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution context: Batch of conditions/context Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return - torch . mean ( log_q ) def reverse_kld ( self , num_samples = 1 , context = None , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution context: Batch of conditions/context beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples , context = context ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z_ , context = context ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z , context = context ) return torch . mean ( log_q ) - beta * torch . mean ( log_p )","title":"ConditionalNormalizingFlow"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.forward","text":"Transforms latent variable z to the flow variable x Parameters: Name Type Description Default z Batch in the latent space required context Batch of conditions/context None Returns: Type Description Batch in the space of the target distribution Source code in normflows/core.py 222 223 224 225 226 227 228 229 230 231 232 233 234 def forward ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z , context = context ) return z","title":"forward"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.forward_and_log_det","text":"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Parameters: Name Type Description Default z Batch in the latent space required context Batch of conditions/context None Returns: Type Description Batch in the space of the target distribution, log determinant of the Jacobian Source code in normflows/core.py 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 def forward_and_log_det ( self , z , context = None ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space context: Batch of conditions/context Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z , context = context ) log_det += log_d return z , log_det","title":"forward_and_log_det"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.forward_kld","text":"Estimates forward KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required context Batch of conditions/context None Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 def forward_kld ( self , x , context = None ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution context: Batch of conditions/context Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return - torch . mean ( log_q )","title":"forward_kld"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.inverse","text":"Transforms flow variable x to the latent variable z Parameters: Name Type Description Default x Batch in the space of the target distribution required context Batch of conditions/context None Returns: Type Description Batch in the latent space Source code in normflows/core.py 254 255 256 257 258 259 260 261 262 263 264 265 266 def inverse ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x , context = context ) return x","title":"inverse"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.inverse_and_log_det","text":"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Parameters: Name Type Description Default x Batch in the space of the target distribution required context Batch of conditions/context None Returns: Type Description Batch in the latent space, log determinant of the Jacobian Source code in normflows/core.py 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 def inverse_and_log_det ( self , x , context = None ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution context: Batch of conditions/context Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x , context = context ) log_det += log_d return x , log_det","title":"inverse_and_log_det"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.log_prob","text":"Get log probability for batch Parameters: Name Type Description Default x Batch required context Batch of conditions/context None Returns: Type Description log probability Source code in normflows/core.py 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 def log_prob ( self , x , context = None ): \"\"\"Get log probability for batch Args: x: Batch context: Batch of conditions/context Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z , context = context ) return log_q","title":"log_prob"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.reverse_kld","text":"Estimates reverse KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default num_samples Number of samples to draw from base distribution 1 context Batch of conditions/context None beta Annealing parameter, see arXiv 1505.05770 1.0 score_fn Flag whether to include score function in gradient, see arXiv 1703.09194 True Returns: Type Description Estimate of the reverse KL divergence averaged over latent samples Source code in normflows/core.py 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 def reverse_kld ( self , num_samples = 1 , context = None , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution context: Batch of conditions/context beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples , context = context ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ , context = context ) log_q += log_det log_q += self . q0 . log_prob ( z_ , context = context ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z , context = context ) return torch . mean ( log_q ) - beta * torch . mean ( log_p )","title":"reverse_kld"},{"location":"references/#normflows.core.ConditionalNormalizingFlow.sample","text":"Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 context Batch of conditions/context None Returns: Type Description Samples, log probability Source code in normflows/core.py 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 def sample ( self , num_samples = 1 , context = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw context: Batch of conditions/context Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples , context = context ) for flow in self . flows : z , log_det = flow ( z , context = context ) log_q -= log_det return z , log_q","title":"sample"},{"location":"references/#normflows.core.MultiscaleFlow","text":"Bases: Module Normalizing Flow model with multiscale architecture, see RealNVP or Glow paper Source code in normflows/core.py 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 class MultiscaleFlow ( nn . Module ): \"\"\" Normalizing Flow model with multiscale architecture, see RealNVP or Glow paper \"\"\" def __init__ ( self , q0 , flows , merges , transform = None , class_cond = True ): \"\"\"Constructor Args: q0: List of base distribution flows: List of list of flows for each level merges: List of merge/split operations (forward pass must do merge) transform: Initial transformation of inputs class_cond: Flag, indicated whether model has class conditional base distributions \"\"\" super () . __init__ () self . q0 = nn . ModuleList ( q0 ) self . num_levels = len ( self . q0 ) self . flows = torch . nn . ModuleList ([ nn . ModuleList ( flow ) for flow in flows ]) self . merges = torch . nn . ModuleList ( merges ) self . transform = transform self . class_cond = class_cond def forward_kld ( self , x , y = None ): \"\"\"Estimates forward KL divergence, see see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution y: Batch of targets, if applicable Returns: Estimate of forward KL divergence averaged over batch \"\"\" return - torch . mean ( self . log_prob ( x , y )) def forward ( self , x , y = None ): \"\"\"Get negative log-likelihood for maximum likelihood training Args: x: Batch of data y: Batch of targets, if applicable Returns: Negative log-likelihood of the batch \"\"\" return - self . log_prob ( x , y ) def forward_and_log_det ( self , z ): \"\"\"Get observed variable x from list of latent variables z Args: z: List of latent variables Returns: Observed variable x, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( z [ 0 ]), dtype = z [ 0 ] . dtype , device = z [ 0 ] . device ) for i in range ( len ( self . q0 )): if i == 0 : z_ = z [ 0 ] else : z_ , log_det_ = self . merges [ i - 1 ]([ z_ , z [ i ]]) log_det += log_det_ for flow in self . flows [ i ]: z_ , log_det_ = flow ( z_ ) log_det += log_det_ if self . transform is not None : z_ , log_det_ = self . transform ( z_ ) log_det += log_det_ return z_ , log_det def inverse_and_log_det ( self , x ): \"\"\"Get latent variable z from observed variable x Args: x: Observed variable Returns: List of latent variables z, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) if self . transform is not None : x , log_det_ = self . transform . inverse ( x ) log_det += log_det_ z = [ None ] * len ( self . q0 ) for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for flow in reversed ( self . flows [ i ]): x , log_det_ = flow . inverse ( x ) log_det += log_det_ if i == 0 : z [ i ] = x else : [ x , z [ i ]], log_det_ = self . merges [ i - 1 ] . inverse ( x ) log_det += log_det_ return z , log_det def sample ( self , num_samples = 1 , y = None , temperature = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None temperature: Temperature parameter for temp annealed sampling Returns: Samples, log probability \"\"\" if temperature is not None : self . set_temperature ( temperature ) for i in range ( len ( self . q0 )): if self . class_cond : z_ , log_q_ = self . q0 [ i ]( num_samples , y ) else : z_ , log_q_ = self . q0 [ i ]( num_samples ) if i == 0 : log_q = log_q_ z = z_ else : log_q += log_q_ z , log_det = self . merges [ i - 1 ]([ z , z_ ]) log_q -= log_det for flow in self . flows [ i ]: z , log_det = flow ( z ) log_q -= log_det if self . transform is not None : z , log_det = self . transform ( z ) log_q -= log_det if temperature is not None : self . reset_temperature () return z , log_q def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = 0 z = x if self . transform is not None : z , log_det = self . transform . inverse ( z ) log_q += log_det for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for j in range ( len ( self . flows [ i ]) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ][ j ] . inverse ( z ) log_q += log_det if i > 0 : [ z , z_ ], log_det = self . merges [ i - 1 ] . inverse ( z ) log_q += log_det else : z_ = z if self . class_cond : log_q += self . q0 [ i ] . log_prob ( z_ , y ) else : log_q += self . q0 [ i ] . log_prob ( z_ ) return log_q def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path )) def set_temperature ( self , temperature ): \"\"\"Set temperature for temperature a annealed sampling Args: temperature: Temperature parameter \"\"\" for q0 in self . q0 : if hasattr ( q0 , \"temperature\" ): q0 . temperature = temperature else : raise NotImplementedError ( \"One base function does not \" \"support temperature annealed sampling\" ) def reset_temperature ( self ): \"\"\" Set temperature values of base distributions back to None \"\"\" self . set_temperature ( None )","title":"MultiscaleFlow"},{"location":"references/#normflows.core.MultiscaleFlow.__init__","text":"Constructor Args: q0: List of base distribution flows: List of list of flows for each level merges: List of merge/split operations (forward pass must do merge) transform: Initial transformation of inputs class_cond: Flag, indicated whether model has class conditional base distributions Source code in normflows/core.py 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 def __init__ ( self , q0 , flows , merges , transform = None , class_cond = True ): \"\"\"Constructor Args: q0: List of base distribution flows: List of list of flows for each level merges: List of merge/split operations (forward pass must do merge) transform: Initial transformation of inputs class_cond: Flag, indicated whether model has class conditional base distributions \"\"\" super () . __init__ () self . q0 = nn . ModuleList ( q0 ) self . num_levels = len ( self . q0 ) self . flows = torch . nn . ModuleList ([ nn . ModuleList ( flow ) for flow in flows ]) self . merges = torch . nn . ModuleList ( merges ) self . transform = transform self . class_cond = class_cond","title":"__init__"},{"location":"references/#normflows.core.MultiscaleFlow.forward","text":"Get negative log-likelihood for maximum likelihood training Parameters: Name Type Description Default x Batch of data required y Batch of targets, if applicable None Returns: Type Description Negative log-likelihood of the batch Source code in normflows/core.py 492 493 494 495 496 497 498 499 500 501 502 def forward ( self , x , y = None ): \"\"\"Get negative log-likelihood for maximum likelihood training Args: x: Batch of data y: Batch of targets, if applicable Returns: Negative log-likelihood of the batch \"\"\" return - self . log_prob ( x , y )","title":"forward"},{"location":"references/#normflows.core.MultiscaleFlow.forward_and_log_det","text":"Get observed variable x from list of latent variables z Parameters: Name Type Description Default z List of latent variables required Returns: Type Description Observed variable x, log determinant of Jacobian Source code in normflows/core.py 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 def forward_and_log_det ( self , z ): \"\"\"Get observed variable x from list of latent variables z Args: z: List of latent variables Returns: Observed variable x, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( z [ 0 ]), dtype = z [ 0 ] . dtype , device = z [ 0 ] . device ) for i in range ( len ( self . q0 )): if i == 0 : z_ = z [ 0 ] else : z_ , log_det_ = self . merges [ i - 1 ]([ z_ , z [ i ]]) log_det += log_det_ for flow in self . flows [ i ]: z_ , log_det_ = flow ( z_ ) log_det += log_det_ if self . transform is not None : z_ , log_det_ = self . transform ( z_ ) log_det += log_det_ return z_ , log_det","title":"forward_and_log_det"},{"location":"references/#normflows.core.MultiscaleFlow.forward_kld","text":"Estimates forward KL divergence, see see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required y Batch of targets, if applicable None Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 480 481 482 483 484 485 486 487 488 489 490 def forward_kld ( self , x , y = None ): \"\"\"Estimates forward KL divergence, see see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution y: Batch of targets, if applicable Returns: Estimate of forward KL divergence averaged over batch \"\"\" return - torch . mean ( self . log_prob ( x , y ))","title":"forward_kld"},{"location":"references/#normflows.core.MultiscaleFlow.inverse_and_log_det","text":"Get latent variable z from observed variable x Parameters: Name Type Description Default x Observed variable required Returns: Type Description List of latent variables z, log determinant of Jacobian Source code in normflows/core.py 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 def inverse_and_log_det ( self , x ): \"\"\"Get latent variable z from observed variable x Args: x: Observed variable Returns: List of latent variables z, log determinant of Jacobian \"\"\" log_det = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) if self . transform is not None : x , log_det_ = self . transform . inverse ( x ) log_det += log_det_ z = [ None ] * len ( self . q0 ) for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for flow in reversed ( self . flows [ i ]): x , log_det_ = flow . inverse ( x ) log_det += log_det_ if i == 0 : z [ i ] = x else : [ x , z [ i ]], log_det_ = self . merges [ i - 1 ] . inverse ( x ) log_det += log_det_ return z , log_det","title":"inverse_and_log_det"},{"location":"references/#normflows.core.MultiscaleFlow.load","text":"Load model from state dict Parameters: Name Type Description Default path Path including filename where to load model from required Source code in normflows/core.py 626 627 628 629 630 631 632 def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path ))","title":"load"},{"location":"references/#normflows.core.MultiscaleFlow.log_prob","text":"Get log probability for batch Parameters: Name Type Description Default x Batch required y Classes of x required Returns: Type Description log probability Source code in normflows/core.py 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 def log_prob ( self , x , y ): \"\"\"Get log probability for batch Args: x: Batch y: Classes of x Returns: log probability \"\"\" log_q = 0 z = x if self . transform is not None : z , log_det = self . transform . inverse ( z ) log_q += log_det for i in range ( len ( self . q0 ) - 1 , - 1 , - 1 ): for j in range ( len ( self . flows [ i ]) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ][ j ] . inverse ( z ) log_q += log_det if i > 0 : [ z , z_ ], log_det = self . merges [ i - 1 ] . inverse ( z ) log_q += log_det else : z_ = z if self . class_cond : log_q += self . q0 [ i ] . log_prob ( z_ , y ) else : log_q += self . q0 [ i ] . log_prob ( z_ ) return log_q","title":"log_prob"},{"location":"references/#normflows.core.MultiscaleFlow.reset_temperature","text":"Set temperature values of base distributions back to None Source code in normflows/core.py 649 650 651 652 653 def reset_temperature ( self ): \"\"\" Set temperature values of base distributions back to None \"\"\" self . set_temperature ( None )","title":"reset_temperature"},{"location":"references/#normflows.core.MultiscaleFlow.sample","text":"Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 y Classes to sample from, will be sampled uniformly if None None temperature Temperature parameter for temp annealed sampling None Returns: Type Description Samples, log probability Source code in normflows/core.py 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 def sample ( self , num_samples = 1 , y = None , temperature = None ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw y: Classes to sample from, will be sampled uniformly if None temperature: Temperature parameter for temp annealed sampling Returns: Samples, log probability \"\"\" if temperature is not None : self . set_temperature ( temperature ) for i in range ( len ( self . q0 )): if self . class_cond : z_ , log_q_ = self . q0 [ i ]( num_samples , y ) else : z_ , log_q_ = self . q0 [ i ]( num_samples ) if i == 0 : log_q = log_q_ z = z_ else : log_q += log_q_ z , log_det = self . merges [ i - 1 ]([ z , z_ ]) log_q -= log_det for flow in self . flows [ i ]: z , log_det = flow ( z ) log_q -= log_det if self . transform is not None : z , log_det = self . transform ( z ) log_q -= log_det if temperature is not None : self . reset_temperature () return z , log_q","title":"sample"},{"location":"references/#normflows.core.MultiscaleFlow.save","text":"Save state dict of model Parameters: Name Type Description Default path Path including filename where to save model required Source code in normflows/core.py 618 619 620 621 622 623 624 def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path )","title":"save"},{"location":"references/#normflows.core.MultiscaleFlow.set_temperature","text":"Set temperature for temperature a annealed sampling Parameters: Name Type Description Default temperature Temperature parameter required Source code in normflows/core.py 634 635 636 637 638 639 640 641 642 643 644 645 646 647 def set_temperature ( self , temperature ): \"\"\"Set temperature for temperature a annealed sampling Args: temperature: Temperature parameter \"\"\" for q0 in self . q0 : if hasattr ( q0 , \"temperature\" ): q0 . temperature = temperature else : raise NotImplementedError ( \"One base function does not \" \"support temperature annealed sampling\" )","title":"set_temperature"},{"location":"references/#normflows.core.NormalizingFlow","text":"Bases: Module Normalizing Flow model to approximate target distribution Source code in normflows/core.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 class NormalizingFlow ( nn . Module ): \"\"\" Normalizing Flow model to approximate target distribution \"\"\" def __init__ ( self , q0 , flows , p = None ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows p: Target distribution \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) self . p = p def forward ( self , z ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z ) return z def forward_and_log_det ( self , z ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z ) log_det += log_d return z , log_det def inverse ( self , x ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x ) return x def inverse_and_log_det ( self , x ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x ) log_det += log_d return x , log_det def forward_kld ( self , x ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return - torch . mean ( log_q ) def reverse_kld ( self , num_samples = 1 , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z ) return torch . mean ( log_q ) - beta * torch . mean ( log_p ) def reverse_alpha_div ( self , num_samples = 1 , alpha = 1 , dreg = False ): \"\"\"Alpha divergence when sampling from q Args: num_samples: Number of samples to draw dreg: Flag whether to use Double Reparametrized Gradient estimator, see [arXiv 1810.04152](https://arxiv.org/abs/1810.04152) Returns: Alpha divergence \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . p . log_prob ( z ) if dreg : w_const = torch . exp ( log_p - log_q ) . detach () z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) w = torch . exp ( log_p - log_q ) w_alpha = w_const ** alpha w_alpha = w_alpha / torch . mean ( w_alpha ) weights = ( 1 - alpha ) * w_alpha + alpha * w_alpha ** 2 loss = - alpha * torch . mean ( weights * torch . log ( w )) else : loss = np . sign ( alpha - 1 ) * torch . logsumexp ( alpha * ( log_p - log_q ), 0 ) return loss def sample ( self , num_samples = 1 ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q def log_prob ( self , x ): \"\"\"Get log probability for batch Args: x: Batch Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return log_q def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path ) def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path ))","title":"NormalizingFlow"},{"location":"references/#normflows.core.NormalizingFlow.__init__","text":"Constructor Parameters: Name Type Description Default q0 Base distribution required flows List of flows required p Target distribution None Source code in normflows/core.py 14 15 16 17 18 19 20 21 22 23 24 25 def __init__ ( self , q0 , flows , p = None ): \"\"\"Constructor Args: q0: Base distribution flows: List of flows p: Target distribution \"\"\" super () . __init__ () self . q0 = q0 self . flows = nn . ModuleList ( flows ) self . p = p","title":"__init__"},{"location":"references/#normflows.core.NormalizingFlow.forward","text":"Transforms latent variable z to the flow variable x Parameters: Name Type Description Default z Batch in the latent space required Returns: Type Description Batch in the space of the target distribution Source code in normflows/core.py 27 28 29 30 31 32 33 34 35 36 37 38 def forward ( self , z ): \"\"\"Transforms latent variable z to the flow variable x Args: z: Batch in the latent space Returns: Batch in the space of the target distribution \"\"\" for flow in self . flows : z , _ = flow ( z ) return z","title":"forward"},{"location":"references/#normflows.core.NormalizingFlow.forward_and_log_det","text":"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Parameters: Name Type Description Default z Batch in the latent space required Returns: Type Description Batch in the space of the target distribution, log determinant of the Jacobian Source code in normflows/core.py 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 def forward_and_log_det ( self , z ): \"\"\"Transforms latent variable z to the flow variable x and computes log determinant of the Jacobian Args: z: Batch in the latent space Returns: Batch in the space of the target distribution, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( z ), device = z . device ) for flow in self . flows : z , log_d = flow ( z ) log_det += log_d return z , log_det","title":"forward_and_log_det"},{"location":"references/#normflows.core.NormalizingFlow.forward_kld","text":"Estimates forward KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default x Batch sampled from target distribution required Returns: Type Description Estimate of forward KL divergence averaged over batch Source code in normflows/core.py 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 def forward_kld ( self , x ): \"\"\"Estimates forward KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: x: Batch sampled from target distribution Returns: Estimate of forward KL divergence averaged over batch \"\"\" log_q = torch . zeros ( len ( x ), device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return - torch . mean ( log_q )","title":"forward_kld"},{"location":"references/#normflows.core.NormalizingFlow.inverse","text":"Transforms flow variable x to the latent variable z Parameters: Name Type Description Default x Batch in the space of the target distribution required Returns: Type Description Batch in the latent space Source code in normflows/core.py 57 58 59 60 61 62 63 64 65 66 67 68 def inverse ( self , x ): \"\"\"Transforms flow variable x to the latent variable z Args: x: Batch in the space of the target distribution Returns: Batch in the latent space \"\"\" for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , _ = self . flows [ i ] . inverse ( x ) return x","title":"inverse"},{"location":"references/#normflows.core.NormalizingFlow.inverse_and_log_det","text":"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Parameters: Name Type Description Default x Batch in the space of the target distribution required Returns: Type Description Batch in the latent space, log determinant of the Jacobian Source code in normflows/core.py 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 def inverse_and_log_det ( self , x ): \"\"\"Transforms flow variable x to the latent variable z and computes log determinant of the Jacobian Args: x: Batch in the space of the target distribution Returns: Batch in the latent space, log determinant of the Jacobian \"\"\" log_det = torch . zeros ( len ( x ), device = x . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): x , log_d = self . flows [ i ] . inverse ( x ) log_det += log_d return x , log_det","title":"inverse_and_log_det"},{"location":"references/#normflows.core.NormalizingFlow.load","text":"Load model from state dict Parameters: Name Type Description Default path Path including filename where to load model from required Source code in normflows/core.py 207 208 209 210 211 212 213 def load ( self , path ): \"\"\"Load model from state dict Args: path: Path including filename where to load model from \"\"\" self . load_state_dict ( torch . load ( path ))","title":"load"},{"location":"references/#normflows.core.NormalizingFlow.log_prob","text":"Get log probability for batch Parameters: Name Type Description Default x Batch required Returns: Type Description log probability Source code in normflows/core.py 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 def log_prob ( self , x ): \"\"\"Get log probability for batch Args: x: Batch Returns: log probability \"\"\" log_q = torch . zeros ( len ( x ), dtype = x . dtype , device = x . device ) z = x for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_q += log_det log_q += self . q0 . log_prob ( z ) return log_q","title":"log_prob"},{"location":"references/#normflows.core.NormalizingFlow.reverse_alpha_div","text":"Alpha divergence when sampling from q Parameters: Name Type Description Default num_samples Number of samples to draw 1 dreg Flag whether to use Double Reparametrized Gradient estimator, see arXiv 1810.04152 False Returns: Type Description Alpha divergence Source code in normflows/core.py 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 def reverse_alpha_div ( self , num_samples = 1 , alpha = 1 , dreg = False ): \"\"\"Alpha divergence when sampling from q Args: num_samples: Number of samples to draw dreg: Flag whether to use Double Reparametrized Gradient estimator, see [arXiv 1810.04152](https://arxiv.org/abs/1810.04152) Returns: Alpha divergence \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . p . log_prob ( z ) if dreg : w_const = torch . exp ( log_p - log_q ) . detach () z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) w = torch . exp ( log_p - log_q ) w_alpha = w_const ** alpha w_alpha = w_alpha / torch . mean ( w_alpha ) weights = ( 1 - alpha ) * w_alpha + alpha * w_alpha ** 2 loss = - alpha * torch . mean ( weights * torch . log ( w )) else : loss = np . sign ( alpha - 1 ) * torch . logsumexp ( alpha * ( log_p - log_q ), 0 ) return loss","title":"reverse_alpha_div"},{"location":"references/#normflows.core.NormalizingFlow.reverse_kld","text":"Estimates reverse KL divergence, see arXiv 1912.02762 Parameters: Name Type Description Default num_samples Number of samples to draw from base distribution 1 beta Annealing parameter, see arXiv 1505.05770 1.0 score_fn Flag whether to include score function in gradient, see arXiv 1703.09194 True Returns: Type Description Estimate of the reverse KL divergence averaged over latent samples Source code in normflows/core.py 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 def reverse_kld ( self , num_samples = 1 , beta = 1.0 , score_fn = True ): \"\"\"Estimates reverse KL divergence, see [arXiv 1912.02762](https://arxiv.org/abs/1912.02762) Args: num_samples: Number of samples to draw from base distribution beta: Annealing parameter, see [arXiv 1505.05770](https://arxiv.org/abs/1505.05770) score_fn: Flag whether to include score function in gradient, see [arXiv 1703.09194](https://arxiv.org/abs/1703.09194) Returns: Estimate of the reverse KL divergence averaged over latent samples \"\"\" z , log_q_ = self . q0 ( num_samples ) log_q = torch . zeros_like ( log_q_ ) log_q += log_q_ for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det if not score_fn : z_ = z log_q = torch . zeros ( len ( z_ ), device = z_ . device ) utils . set_requires_grad ( self , False ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z_ , log_det = self . flows [ i ] . inverse ( z_ ) log_q += log_det log_q += self . q0 . log_prob ( z_ ) utils . set_requires_grad ( self , True ) log_p = self . p . log_prob ( z ) return torch . mean ( log_q ) - beta * torch . mean ( log_p )","title":"reverse_kld"},{"location":"references/#normflows.core.NormalizingFlow.sample","text":"Samples from flow-based approximate distribution Parameters: Name Type Description Default num_samples Number of samples to draw 1 Returns: Type Description Samples, log probability Source code in normflows/core.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def sample ( self , num_samples = 1 ): \"\"\"Samples from flow-based approximate distribution Args: num_samples: Number of samples to draw Returns: Samples, log probability \"\"\" z , log_q = self . q0 ( num_samples ) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det return z , log_q","title":"sample"},{"location":"references/#normflows.core.NormalizingFlow.save","text":"Save state dict of model Parameters: Name Type Description Default path Path including filename where to save model required Source code in normflows/core.py 199 200 201 202 203 204 205 def save ( self , path ): \"\"\"Save state dict of model Args: path: Path including filename where to save model \"\"\" torch . save ( self . state_dict (), path )","title":"save"},{"location":"references/#normflows.core.NormalizingFlowVAE","text":"Bases: Module VAE using normalizing flows to express approximate distribution Source code in normflows/core.py 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 class NormalizingFlowVAE ( nn . Module ): \"\"\" VAE using normalizing flows to express approximate distribution \"\"\" def __init__ ( self , prior , q0 = distributions . Dirac (), flows = None , decoder = None ): \"\"\"Constructor of normalizing flow model Args: prior: Prior distribution of te VAE, i.e. Gaussian decoder: Optional decoder flows: Flows to transform output of base encoder q0: Base Encoder \"\"\" super () . __init__ () self . prior = prior self . decoder = decoder self . flows = nn . ModuleList ( flows ) self . q0 = q0 def forward ( self , x , num_samples = 1 ): \"\"\"Takes data batch, samples num_samples for each data point from base distribution Args: x: data batch num_samples: number of samples to draw for each data point Returns: latent variables for each batch and sample, log_q, and log_p \"\"\" z , log_q = self . q0 ( x , num_samples = num_samples ) # Flatten batch and sample dim z = z . view ( - 1 , * z . size ()[ 2 :]) log_q = log_q . view ( - 1 , * log_q . size ()[ 2 :]) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . prior . log_prob ( z ) if self . decoder is not None : log_p += self . decoder . log_prob ( x , z ) # Separate batch and sample dimension again z = z . view ( - 1 , num_samples , * z . size ()[ 1 :]) log_q = log_q . view ( - 1 , num_samples , * log_q . size ()[ 1 :]) log_p = log_p . view ( - 1 , num_samples , * log_p . size ()[ 1 :]) return z , log_q , log_p","title":"NormalizingFlowVAE"},{"location":"references/#normflows.core.NormalizingFlowVAE.__init__","text":"Constructor of normalizing flow model Parameters: Name Type Description Default prior Prior distribution of te VAE, i.e. Gaussian required decoder Optional decoder None flows Flows to transform output of base encoder None q0 Base Encoder Dirac () Source code in normflows/core.py 661 662 663 664 665 666 667 668 669 670 671 672 673 674 def __init__ ( self , prior , q0 = distributions . Dirac (), flows = None , decoder = None ): \"\"\"Constructor of normalizing flow model Args: prior: Prior distribution of te VAE, i.e. Gaussian decoder: Optional decoder flows: Flows to transform output of base encoder q0: Base Encoder \"\"\" super () . __init__ () self . prior = prior self . decoder = decoder self . flows = nn . ModuleList ( flows ) self . q0 = q0","title":"__init__"},{"location":"references/#normflows.core.NormalizingFlowVAE.forward","text":"Takes data batch, samples num_samples for each data point from base distribution Parameters: Name Type Description Default x data batch required num_samples number of samples to draw for each data point 1 Returns: Type Description latent variables for each batch and sample, log_q, and log_p Source code in normflows/core.py 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 def forward ( self , x , num_samples = 1 ): \"\"\"Takes data batch, samples num_samples for each data point from base distribution Args: x: data batch num_samples: number of samples to draw for each data point Returns: latent variables for each batch and sample, log_q, and log_p \"\"\" z , log_q = self . q0 ( x , num_samples = num_samples ) # Flatten batch and sample dim z = z . view ( - 1 , * z . size ()[ 2 :]) log_q = log_q . view ( - 1 , * log_q . size ()[ 2 :]) for flow in self . flows : z , log_det = flow ( z ) log_q -= log_det log_p = self . prior . log_prob ( z ) if self . decoder is not None : log_p += self . decoder . log_prob ( x , z ) # Separate batch and sample dimension again z = z . view ( - 1 , num_samples , * z . size ()[ 1 :]) log_q = log_q . view ( - 1 , num_samples , * log_q . size ()[ 1 :]) log_p = log_p . view ( - 1 , num_samples , * log_p . size ()[ 1 :]) return z , log_q , log_p","title":"forward"},{"location":"references/#normflows.core_test","text":"","title":"core_test"},{"location":"references/#normflows.distributions","text":"","title":"distributions"},{"location":"references/#normflows.distributions.base","text":"","title":"base"},{"location":"references/#normflows.distributions.base.AffineGaussian","text":"Bases: BaseDistribution Diagonal Gaussian an affine constant transformation applied to it, can be class conditional or not Source code in normflows/distributions/base.py 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 class AffineGaussian ( BaseDistribution ): \"\"\" Diagonal Gaussian an affine constant transformation applied to it, can be class conditional or not \"\"\" def __init__ ( self , shape , affine_shape , num_classes = None ): \"\"\"Constructor Args: shape: Shape of the variables affine_shape: Shape of the parameters in the affine transformation num_classes: Number of classes if the base is class conditional, None otherwise \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . affine_shape = affine_shape self . num_classes = num_classes self . class_cond = num_classes is not None # Affine transformation if self . class_cond : self . transform = flows . CCAffineConst ( self . affine_shape , self . num_classes ) else : self . transform = flows . AffineConstFlow ( self . affine_shape ) # Temperature parameter for annealed sampling self . temperature = None def forward ( self , num_samples = 1 , y = None ): dtype = self . transform . s . dtype device = self . transform . s . device if self . class_cond : if y is not None : num_samples = len ( y ) else : y = torch . randint ( self . num_classes , ( num_samples ,), device = device ) if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = dtype , device = device ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot if self . temperature is not None : log_scale = np . log ( self . temperature ) else : log_scale = 0.0 # Sample eps = torch . randn (( num_samples ,) + self . shape , dtype = dtype , device = device ) z = np . exp ( log_scale ) * eps # Get log prob log_p = ( - 0.5 * self . d * np . log ( 2 * np . pi ) - self . d * log_scale - 0.5 * torch . sum ( torch . pow ( eps , 2 ), dim = self . sum_dim ) ) # Apply transform if self . class_cond : z , log_det = self . transform ( z , y ) else : z , log_det = self . transform ( z ) log_p -= log_det return z , log_p def log_prob ( self , z , y = None ): # Perpare onehot encoding of class if needed if self . class_cond : if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = self . transform . s . dtype , device = self . transform . s . device , ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot if self . temperature is not None : log_scale = np . log ( self . temperature ) else : log_scale = 0.0 # Get log prob if self . class_cond : z , log_p = self . transform . inverse ( z , y ) else : z , log_p = self . transform . inverse ( z ) z = z / np . exp ( log_scale ) log_p = ( log_p - self . d * log_scale - 0.5 * self . d * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . pow ( z , 2 ), dim = self . sum_dim ) ) return log_p","title":"AffineGaussian"},{"location":"references/#normflows.distributions.base.AffineGaussian.__init__","text":"Constructor Parameters: Name Type Description Default shape Shape of the variables required affine_shape Shape of the parameters in the affine transformation required num_classes Number of classes if the base is class conditional, None otherwise None Source code in normflows/distributions/base.py 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 def __init__ ( self , shape , affine_shape , num_classes = None ): \"\"\"Constructor Args: shape: Shape of the variables affine_shape: Shape of the parameters in the affine transformation num_classes: Number of classes if the base is class conditional, None otherwise \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . affine_shape = affine_shape self . num_classes = num_classes self . class_cond = num_classes is not None # Affine transformation if self . class_cond : self . transform = flows . CCAffineConst ( self . affine_shape , self . num_classes ) else : self . transform = flows . AffineConstFlow ( self . affine_shape ) # Temperature parameter for annealed sampling self . temperature = None","title":"__init__"},{"location":"references/#normflows.distributions.base.BaseDistribution","text":"Bases: Module Base distribution of a flow-based model Parameters do not depend of target variable (as is the case for a VAE encoder) Source code in normflows/distributions/base.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 class BaseDistribution ( nn . Module ): \"\"\" Base distribution of a flow-based model Parameters do not depend of target variable (as is the case for a VAE encoder) \"\"\" def __init__ ( self ): super () . __init__ () def forward ( self , num_samples = 1 ): \"\"\"Samples from base distribution and calculates log probability Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution, log probability \"\"\" raise NotImplementedError def log_prob ( self , z ): \"\"\"Calculate log probability of batch of samples Args: z: Batch of random variables to determine log probability for Returns: log probability for each batch element \"\"\" raise NotImplementedError def sample ( self , num_samples = 1 , ** kwargs ): \"\"\"Samples from base distribution Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution \"\"\" z , _ = self . forward ( num_samples , ** kwargs ) return z","title":"BaseDistribution"},{"location":"references/#normflows.distributions.base.BaseDistribution.forward","text":"Samples from base distribution and calculates log probability Parameters: Name Type Description Default num_samples Number of samples to draw from the distriubtion 1 Returns: Type Description Samples drawn from the distribution, log probability Source code in normflows/distributions/base.py 17 18 19 20 21 22 23 24 25 26 def forward ( self , num_samples = 1 ): \"\"\"Samples from base distribution and calculates log probability Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution, log probability \"\"\" raise NotImplementedError","title":"forward"},{"location":"references/#normflows.distributions.base.BaseDistribution.log_prob","text":"Calculate log probability of batch of samples Parameters: Name Type Description Default z Batch of random variables to determine log probability for required Returns: Type Description log probability for each batch element Source code in normflows/distributions/base.py 28 29 30 31 32 33 34 35 36 37 def log_prob ( self , z ): \"\"\"Calculate log probability of batch of samples Args: z: Batch of random variables to determine log probability for Returns: log probability for each batch element \"\"\" raise NotImplementedError","title":"log_prob"},{"location":"references/#normflows.distributions.base.BaseDistribution.sample","text":"Samples from base distribution Parameters: Name Type Description Default num_samples Number of samples to draw from the distriubtion 1 Returns: Type Description Samples drawn from the distribution Source code in normflows/distributions/base.py 39 40 41 42 43 44 45 46 47 48 49 def sample ( self , num_samples = 1 , ** kwargs ): \"\"\"Samples from base distribution Args: num_samples: Number of samples to draw from the distriubtion Returns: Samples drawn from the distribution \"\"\" z , _ = self . forward ( num_samples , ** kwargs ) return z","title":"sample"},{"location":"references/#normflows.distributions.base.ClassCondDiagGaussian","text":"Bases: BaseDistribution Class conditional multivariate Gaussian distribution with diagonal covariance matrix Source code in normflows/distributions/base.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 class ClassCondDiagGaussian ( BaseDistribution ): \"\"\" Class conditional multivariate Gaussian distribution with diagonal covariance matrix \"\"\" def __init__ ( self , shape , num_classes ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension num_classes: Number of classes to condition on \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . perm = [ self . n_dim ] + list ( range ( self . n_dim )) self . d = np . prod ( shape ) self . num_classes = num_classes self . loc = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . log_scale = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . temperature = None # Temperature parameter for annealed sampling def forward ( self , num_samples = 1 , y = None ): if y is not None : num_samples = len ( y ) else : y = torch . randint ( self . num_classes , ( num_samples ,), device = self . loc . device ) if y . dim () == 1 : y_onehot = torch . zeros ( ( self . num_classes , num_samples ), dtype = self . loc . dtype , device = self . loc . device , ) y_onehot . scatter_ ( 0 , y [ None ], 1 ) y = y_onehot else : y = y . t () eps = torch . randn ( ( num_samples ,) + self . shape , dtype = self . loc . dtype , device = self . loc . device ) loc = ( self . loc @ y ) . permute ( * self . perm ) log_scale = ( self . log_scale @ y ) . permute ( * self . perm ) if self . temperature is not None : log_scale = np . log ( self . temperature ) + log_scale z = loc + torch . exp ( log_scale ) * eps log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow ( eps , 2 ), list ( range ( 1 , self . n_dim + 1 )) ) return z , log_p def log_prob ( self , z , y ): if y . dim () == 1 : y_onehot = torch . zeros ( ( self . num_classes , len ( y )), dtype = self . loc . dtype , device = self . loc . device ) y_onehot . scatter_ ( 0 , y [ None ], 1 ) y = y_onehot else : y = y . t () loc = ( self . loc @ y ) . permute ( * self . perm ) log_scale = ( self . log_scale @ y ) . permute ( * self . perm ) if self . temperature is not None : log_scale = np . log ( self . temperature ) + log_scale log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow (( z - loc ) / torch . exp ( log_scale ), 2 ), list ( range ( 1 , self . n_dim + 1 )), ) return log_p","title":"ClassCondDiagGaussian"},{"location":"references/#normflows.distributions.base.ClassCondDiagGaussian.__init__","text":"Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required num_classes Number of classes to condition on required Source code in normflows/distributions/base.py 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 def __init__ ( self , shape , num_classes ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension num_classes: Number of classes to condition on \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . perm = [ self . n_dim ] + list ( range ( self . n_dim )) self . d = np . prod ( shape ) self . num_classes = num_classes self . loc = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . log_scale = nn . Parameter ( torch . zeros ( * self . shape , num_classes )) self . temperature = None # Temperature parameter for annealed sampling","title":"__init__"},{"location":"references/#normflows.distributions.base.ConditionalDiagGaussian","text":"Bases: BaseDistribution Conditional multivariate Gaussian distribution with diagonal covariance matrix, parameters are obtained by a context encoder, context meaning the variable to condition on Source code in normflows/distributions/base.py 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 class ConditionalDiagGaussian ( BaseDistribution ): \"\"\" Conditional multivariate Gaussian distribution with diagonal covariance matrix, parameters are obtained by a context encoder, context meaning the variable to condition on \"\"\" def __init__ ( self , shape , context_encoder ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension context_encoder: Computes mean and log of the standard deviation of the Gaussian, mean is the first half of the last dimension of the encoder output, log of the standard deviation the second half \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . context_encoder = context_encoder def forward ( self , num_samples = 1 , context = None ): encoder_output = self . context_encoder ( context ) split_ind = encoder_output . shape [ - 1 ] // 2 mean = encoder_output [ ... , : split_ind ] log_scale = encoder_output [ ... , split_ind :] eps = torch . randn ( ( num_samples ,) + self . shape , dtype = mean . dtype , device = mean . device ) z = mean + torch . exp ( log_scale ) * eps log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow ( eps , 2 ), list ( range ( 1 , self . n_dim + 1 )) ) return z , log_p def log_prob ( self , z , context = None ): encoder_output = self . context_encoder ( context ) split_ind = encoder_output . shape [ - 1 ] // 2 mean = encoder_output [ ... , : split_ind ] log_scale = encoder_output [ ... , split_ind :] log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow (( z - mean ) / torch . exp ( log_scale ), 2 ), list ( range ( 1 , self . n_dim + 1 )), ) return log_p","title":"ConditionalDiagGaussian"},{"location":"references/#normflows.distributions.base.ConditionalDiagGaussian.__init__","text":"Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required context_encoder Computes mean and log of the standard deviation required Source code in normflows/distributions/base.py 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 def __init__ ( self , shape , context_encoder ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension context_encoder: Computes mean and log of the standard deviation of the Gaussian, mean is the first half of the last dimension of the encoder output, log of the standard deviation the second half \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) self . context_encoder = context_encoder","title":"__init__"},{"location":"references/#normflows.distributions.base.DiagGaussian","text":"Bases: BaseDistribution Multivariate Gaussian distribution with diagonal covariance matrix Source code in normflows/distributions/base.py 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 class DiagGaussian ( BaseDistribution ): \"\"\" Multivariate Gaussian distribution with diagonal covariance matrix \"\"\" def __init__ ( self , shape , trainable = True ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension trainable: Flag whether to use trainable or fixed parameters \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) if trainable : self . loc = nn . Parameter ( torch . zeros ( 1 , * self . shape )) self . log_scale = nn . Parameter ( torch . zeros ( 1 , * self . shape )) else : self . register_buffer ( \"loc\" , torch . zeros ( 1 , * self . shape )) self . register_buffer ( \"log_scale\" , torch . zeros ( 1 , * self . shape )) self . temperature = None # Temperature parameter for annealed sampling def forward ( self , num_samples = 1 , context = None ): eps = torch . randn ( ( num_samples ,) + self . shape , dtype = self . loc . dtype , device = self . loc . device ) if self . temperature is None : log_scale = self . log_scale else : log_scale = self . log_scale + np . log ( self . temperature ) z = self . loc + torch . exp ( log_scale ) * eps log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow ( eps , 2 ), list ( range ( 1 , self . n_dim + 1 )) ) return z , log_p def log_prob ( self , z , context = None ): if self . temperature is None : log_scale = self . log_scale else : log_scale = self . log_scale + np . log ( self . temperature ) log_p = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( log_scale + 0.5 * torch . pow (( z - self . loc ) / torch . exp ( log_scale ), 2 ), list ( range ( 1 , self . n_dim + 1 )), ) return log_p","title":"DiagGaussian"},{"location":"references/#normflows.distributions.base.DiagGaussian.__init__","text":"Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required trainable Flag whether to use trainable or fixed parameters True Source code in normflows/distributions/base.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 def __init__ ( self , shape , trainable = True ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension trainable: Flag whether to use trainable or fixed parameters \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . d = np . prod ( shape ) if trainable : self . loc = nn . Parameter ( torch . zeros ( 1 , * self . shape )) self . log_scale = nn . Parameter ( torch . zeros ( 1 , * self . shape )) else : self . register_buffer ( \"loc\" , torch . zeros ( 1 , * self . shape )) self . register_buffer ( \"log_scale\" , torch . zeros ( 1 , * self . shape )) self . temperature = None # Temperature parameter for annealed sampling","title":"__init__"},{"location":"references/#normflows.distributions.base.GaussianMixture","text":"Bases: BaseDistribution Mixture of Gaussians with diagonal covariance matrix Source code in normflows/distributions/base.py 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 class GaussianMixture ( BaseDistribution ): \"\"\" Mixture of Gaussians with diagonal covariance matrix \"\"\" def __init__ ( self , n_modes , dim , loc = None , scale = None , weights = None , trainable = True ): \"\"\"Constructor Args: n_modes: Number of modes of the mixture model dim: Number of dimensions of each Gaussian loc: List of mean values scale: List of diagonals of the covariance matrices weights: List of mode probabilities trainable: Flag, if true parameters will be optimized during training \"\"\" super () . __init__ () self . n_modes = n_modes self . dim = dim if loc is None : loc = np . random . randn ( self . n_modes , self . dim ) loc = np . array ( loc )[ None , ... ] if scale is None : scale = np . ones (( self . n_modes , self . dim )) scale = np . array ( scale )[ None , ... ] if weights is None : weights = np . ones ( self . n_modes ) weights = np . array ( weights )[ None , ... ] weights /= weights . sum ( 1 ) if trainable : self . loc = nn . Parameter ( torch . tensor ( 1.0 * loc )) self . log_scale = nn . Parameter ( torch . tensor ( np . log ( 1.0 * scale ))) self . weight_scores = nn . Parameter ( torch . tensor ( np . log ( 1.0 * weights ))) else : self . register_buffer ( \"loc\" , torch . tensor ( 1.0 * loc )) self . register_buffer ( \"log_scale\" , torch . tensor ( np . log ( 1.0 * scale ))) self . register_buffer ( \"weight_scores\" , torch . tensor ( np . log ( 1.0 * weights ))) def forward ( self , num_samples = 1 ): # Get weights weights = torch . softmax ( self . weight_scores , 1 ) # Sample mode indices mode = torch . multinomial ( weights [ 0 , :], num_samples , replacement = True ) mode_1h = nn . functional . one_hot ( mode , self . n_modes ) mode_1h = mode_1h [ ... , None ] # Get samples eps_ = torch . randn ( num_samples , self . dim , dtype = self . loc . dtype , device = self . loc . device ) scale_sample = torch . sum ( torch . exp ( self . log_scale ) * mode_1h , 1 ) loc_sample = torch . sum ( self . loc * mode_1h , 1 ) z = eps_ * scale_sample + loc_sample # Compute log probability eps = ( z [:, None , :] - self . loc ) / torch . exp ( self . log_scale ) log_p = ( - 0.5 * self . dim * np . log ( 2 * np . pi ) + torch . log ( weights ) - 0.5 * torch . sum ( torch . pow ( eps , 2 ), 2 ) - torch . sum ( self . log_scale , 2 ) ) log_p = torch . logsumexp ( log_p , 1 ) return z , log_p def log_prob ( self , z ): # Get weights weights = torch . softmax ( self . weight_scores , 1 ) # Compute log probability eps = ( z [:, None , :] - self . loc ) / torch . exp ( self . log_scale ) log_p = ( - 0.5 * self . dim * np . log ( 2 * np . pi ) + torch . log ( weights ) - 0.5 * torch . sum ( torch . pow ( eps , 2 ), 2 ) - torch . sum ( self . log_scale , 2 ) ) log_p = torch . logsumexp ( log_p , 1 ) return log_p","title":"GaussianMixture"},{"location":"references/#normflows.distributions.base.GaussianMixture.__init__","text":"Constructor Parameters: Name Type Description Default n_modes Number of modes of the mixture model required dim Number of dimensions of each Gaussian required loc List of mean values None scale List of diagonals of the covariance matrices None weights List of mode probabilities None trainable Flag, if true parameters will be optimized during training True Source code in normflows/distributions/base.py 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 def __init__ ( self , n_modes , dim , loc = None , scale = None , weights = None , trainable = True ): \"\"\"Constructor Args: n_modes: Number of modes of the mixture model dim: Number of dimensions of each Gaussian loc: List of mean values scale: List of diagonals of the covariance matrices weights: List of mode probabilities trainable: Flag, if true parameters will be optimized during training \"\"\" super () . __init__ () self . n_modes = n_modes self . dim = dim if loc is None : loc = np . random . randn ( self . n_modes , self . dim ) loc = np . array ( loc )[ None , ... ] if scale is None : scale = np . ones (( self . n_modes , self . dim )) scale = np . array ( scale )[ None , ... ] if weights is None : weights = np . ones ( self . n_modes ) weights = np . array ( weights )[ None , ... ] weights /= weights . sum ( 1 ) if trainable : self . loc = nn . Parameter ( torch . tensor ( 1.0 * loc )) self . log_scale = nn . Parameter ( torch . tensor ( np . log ( 1.0 * scale ))) self . weight_scores = nn . Parameter ( torch . tensor ( np . log ( 1.0 * weights ))) else : self . register_buffer ( \"loc\" , torch . tensor ( 1.0 * loc )) self . register_buffer ( \"log_scale\" , torch . tensor ( np . log ( 1.0 * scale ))) self . register_buffer ( \"weight_scores\" , torch . tensor ( np . log ( 1.0 * weights )))","title":"__init__"},{"location":"references/#normflows.distributions.base.GaussianPCA","text":"Bases: BaseDistribution Gaussian distribution resulting from linearly mapping a normal distributed latent variable describing the \"content of the target\" Source code in normflows/distributions/base.py 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 class GaussianPCA ( BaseDistribution ): \"\"\" Gaussian distribution resulting from linearly mapping a normal distributed latent variable describing the \"content of the target\" \"\"\" def __init__ ( self , dim , latent_dim = None , sigma = 0.1 ): \"\"\"Constructor Args: dim: Number of dimensions of the flow variables latent_dim: Number of dimensions of the latent \"content\" variable; if None it is set equal to dim sigma: Noise level \"\"\" super () . __init__ () self . dim = dim if latent_dim is None : self . latent_dim = dim else : self . latent_dim = latent_dim self . loc = nn . Parameter ( torch . zeros ( 1 , dim )) self . W = nn . Parameter ( torch . randn ( latent_dim , dim )) self . log_sigma = nn . Parameter ( torch . tensor ( np . log ( sigma ))) def forward ( self , num_samples = 1 ): eps = torch . randn ( num_samples , self . latent_dim , dtype = self . loc . dtype , device = self . loc . device ) z_ = torch . matmul ( eps , self . W ) z = z_ + self . loc Sig = torch . matmul ( self . W . T , self . W ) + torch . exp ( self . log_sigma * 2 ) * torch . eye ( self . dim , dtype = self . loc . dtype , device = self . loc . device ) log_p = ( self . dim / 2 * np . log ( 2 * np . pi ) - 0.5 * torch . det ( Sig ) - 0.5 * torch . sum ( z_ * torch . matmul ( z_ , torch . inverse ( Sig )), 1 ) ) return z , log_p def log_prob ( self , z ): z_ = z - self . loc Sig = torch . matmul ( self . W . T , self . W ) + torch . exp ( self . log_sigma * 2 ) * torch . eye ( self . dim , dtype = self . loc . dtype , device = self . loc . device ) log_p = ( self . dim / 2 * np . log ( 2 * np . pi ) - 0.5 * torch . det ( Sig ) - 0.5 * torch . sum ( z_ * torch . matmul ( z_ , torch . inverse ( Sig )), 1 ) ) return log_p","title":"GaussianPCA"},{"location":"references/#normflows.distributions.base.GaussianPCA.__init__","text":"Constructor Parameters: Name Type Description Default dim Number of dimensions of the flow variables required latent_dim Number of dimensions of the latent \"content\" variable; if None it is set equal to dim None sigma Noise level 0.1 Source code in normflows/distributions/base.py 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 def __init__ ( self , dim , latent_dim = None , sigma = 0.1 ): \"\"\"Constructor Args: dim: Number of dimensions of the flow variables latent_dim: Number of dimensions of the latent \"content\" variable; if None it is set equal to dim sigma: Noise level \"\"\" super () . __init__ () self . dim = dim if latent_dim is None : self . latent_dim = dim else : self . latent_dim = latent_dim self . loc = nn . Parameter ( torch . zeros ( 1 , dim )) self . W = nn . Parameter ( torch . randn ( latent_dim , dim )) self . log_sigma = nn . Parameter ( torch . tensor ( np . log ( sigma )))","title":"__init__"},{"location":"references/#normflows.distributions.base.GlowBase","text":"Bases: BaseDistribution Base distribution of the Glow model, i.e. Diagonal Gaussian with one mean and log scale for each channel Source code in normflows/distributions/base.py 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 class GlowBase ( BaseDistribution ): \"\"\" Base distribution of the Glow model, i.e. Diagonal Gaussian with one mean and log scale for each channel \"\"\" def __init__ ( self , shape , num_classes = None , logscale_factor = 3.0 ): \"\"\"Constructor Args: shape: Shape of the variables num_classes: Number of classes if the base is class conditional, None otherwise logscale_factor: Scaling factor for mean and log variance \"\"\" super () . __init__ () # Save shape and related statistics if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . num_pix = np . prod ( shape [ 1 :]) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . num_classes = num_classes self . class_cond = num_classes is not None self . logscale_factor = logscale_factor # Set up parameters self . loc = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . loc_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) # Class conditional parameter if needed if self . class_cond : self . loc_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ])) self . log_scale_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ]) ) # Temperature parameter for annealed sampling self . temperature = None def forward ( self , num_samples = 1 , y = None ): # Prepare parameter loc = self . loc * torch . exp ( self . loc_logs * self . logscale_factor ) log_scale = self . log_scale * torch . exp ( self . log_scale_logs * self . logscale_factor ) if self . class_cond : if y is not None : num_samples = len ( y ) else : y = torch . randint ( self . num_classes , ( num_samples ,), device = self . loc . device ) if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = self . loc . dtype , device = self . loc . device , ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot loc = loc + ( y @ self . loc_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) log_scale = log_scale + ( y @ self . log_scale_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) if self . temperature is not None : log_scale = log_scale + np . log ( self . temperature ) # Sample eps = torch . randn ( ( num_samples ,) + self . shape , dtype = self . loc . dtype , device = self . loc . device ) z = loc + torch . exp ( log_scale ) * eps # Get log prob log_p = ( - 0.5 * self . d * np . log ( 2 * np . pi ) - self . num_pix * torch . sum ( log_scale , dim = self . sum_dim ) - 0.5 * torch . sum ( torch . pow ( eps , 2 ), dim = self . sum_dim ) ) return z , log_p def log_prob ( self , z , y = None ): # Perpare parameter loc = self . loc * torch . exp ( self . loc_logs * self . logscale_factor ) log_scale = self . log_scale * torch . exp ( self . log_scale_logs * self . logscale_factor ) if self . class_cond : if y . dim () == 1 : y_onehot = torch . zeros ( ( len ( y ), self . num_classes ), dtype = self . loc . dtype , device = self . loc . device , ) y_onehot . scatter_ ( 1 , y [:, None ], 1 ) y = y_onehot loc = loc + ( y @ self . loc_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) log_scale = log_scale + ( y @ self . log_scale_cc ) . view ( y . size ( 0 ), self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ]) ) if self . temperature is not None : log_scale = log_scale + np . log ( self . temperature ) # Get log prob log_p = ( - 0.5 * self . d * np . log ( 2 * np . pi ) - self . num_pix * torch . sum ( log_scale , dim = self . sum_dim ) - 0.5 * torch . sum ( torch . pow (( z - loc ) / torch . exp ( log_scale ), 2 ), dim = self . sum_dim ) ) return log_p","title":"GlowBase"},{"location":"references/#normflows.distributions.base.GlowBase.__init__","text":"Constructor Parameters: Name Type Description Default shape Shape of the variables required num_classes Number of classes if the base is class conditional, None otherwise None logscale_factor Scaling factor for mean and log variance 3.0 Source code in normflows/distributions/base.py 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 def __init__ ( self , shape , num_classes = None , logscale_factor = 3.0 ): \"\"\"Constructor Args: shape: Shape of the variables num_classes: Number of classes if the base is class conditional, None otherwise logscale_factor: Scaling factor for mean and log variance \"\"\" super () . __init__ () # Save shape and related statistics if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . n_dim = len ( shape ) self . num_pix = np . prod ( shape [ 1 :]) self . d = np . prod ( shape ) self . sum_dim = list ( range ( 1 , self . n_dim + 1 )) self . num_classes = num_classes self . class_cond = num_classes is not None self . logscale_factor = logscale_factor # Set up parameters self . loc = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . loc_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) self . log_scale_logs = nn . Parameter ( torch . zeros ( 1 , self . shape [ 0 ], * (( self . n_dim - 1 ) * [ 1 ])) ) # Class conditional parameter if needed if self . class_cond : self . loc_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ])) self . log_scale_cc = nn . Parameter ( torch . zeros ( self . num_classes , self . shape [ 0 ]) ) # Temperature parameter for annealed sampling self . temperature = None","title":"__init__"},{"location":"references/#normflows.distributions.base.Uniform","text":"Bases: BaseDistribution Multivariate uniform distribution Source code in normflows/distributions/base.py 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 class Uniform ( BaseDistribution ): \"\"\" Multivariate uniform distribution \"\"\" def __init__ ( self , shape , low =- 1.0 , high = 1.0 ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension low: Lower bound of uniform distribution high: Upper bound of uniform distribution \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . d = np . prod ( shape ) self . low = torch . tensor ( low ) self . high = torch . tensor ( high ) self . log_prob_val = - self . d * np . log ( self . high - self . low ) def forward ( self , num_samples = 1 , context = None ): eps = torch . rand ( ( num_samples ,) + self . shape , dtype = self . low . dtype , device = self . low . device ) z = self . low + ( self . high - self . low ) * eps log_p = self . log_prob_val * torch . ones ( num_samples , device = self . low . device ) return z , log_p def log_prob ( self , z , context = None ): log_p = self . log_prob_val * torch . ones ( z . shape [ 0 ], device = z . device ) out_range = torch . logical_or ( z < self . low , z > self . high ) ind_inf = torch . any ( torch . reshape ( out_range , ( z . shape [ 0 ], - 1 )), dim =- 1 ) log_p [ ind_inf ] = - np . inf return log_p","title":"Uniform"},{"location":"references/#normflows.distributions.base.Uniform.__init__","text":"Constructor Parameters: Name Type Description Default shape Tuple with shape of data, if int shape has one dimension required low Lower bound of uniform distribution -1.0 high Upper bound of uniform distribution 1.0 Source code in normflows/distributions/base.py 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def __init__ ( self , shape , low =- 1.0 , high = 1.0 ): \"\"\"Constructor Args: shape: Tuple with shape of data, if int shape has one dimension low: Lower bound of uniform distribution high: Upper bound of uniform distribution \"\"\" super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) if isinstance ( shape , list ): shape = tuple ( shape ) self . shape = shape self . d = np . prod ( shape ) self . low = torch . tensor ( low ) self . high = torch . tensor ( high ) self . log_prob_val = - self . d * np . log ( self . high - self . low )","title":"__init__"},{"location":"references/#normflows.distributions.base.UniformGaussian","text":"Bases: BaseDistribution Distribution of a 1D random variable with some entries having a uniform and others a Gaussian distribution Source code in normflows/distributions/base.py 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 class UniformGaussian ( BaseDistribution ): \"\"\" Distribution of a 1D random variable with some entries having a uniform and others a Gaussian distribution \"\"\" def __init__ ( self , ndim , ind , scale = None ): \"\"\"Constructor Args: ndim: Int, number of dimensions ind: Iterable, indices of uniformly distributed entries scale: Iterable, standard deviation of Gaussian or width of uniform distribution \"\"\" super () . __init__ () self . ndim = ndim if isinstance ( ind , int ): ind = [ ind ] # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) if scale is None : self . register_buffer ( \"scale\" , torch . ones ( self . ndim )) else : self . register_buffer ( \"scale\" , scale ) def forward ( self , num_samples = 1 , context = None ): z = self . sample ( num_samples ) return z , self . log_prob ( z ) def sample ( self , num_samples = 1 , context = None ): eps_u = ( torch . rand ( ( num_samples , len ( self . ind )), dtype = self . scale . dtype , device = self . scale . device , ) - 0.5 ) eps_g = torch . randn ( ( num_samples , len ( self . ind_ )), dtype = self . scale . dtype , device = self . scale . device , ) z = torch . cat (( eps_u , eps_g ), - 1 ) z = z [ ... , self . inv_perm ] return self . scale * z def log_prob ( self , z , context = None ): log_p_u = torch . broadcast_to ( - torch . log ( self . scale [ self . ind ]), ( len ( z ), - 1 )) log_p_g = ( - 0.5 * np . log ( 2 * np . pi ) - torch . log ( self . scale [ self . ind_ ]) - 0.5 * torch . pow ( z [ ... , self . ind_ ] / self . scale [ self . ind_ ], 2 ) ) return torch . sum ( log_p_u , - 1 ) + torch . sum ( log_p_g , - 1 )","title":"UniformGaussian"},{"location":"references/#normflows.distributions.base.UniformGaussian.__init__","text":"Constructor Parameters: Name Type Description Default ndim Int, number of dimensions required ind Iterable, indices of uniformly distributed entries required scale Iterable, standard deviation of Gaussian or width of uniform distribution None Source code in normflows/distributions/base.py 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 def __init__ ( self , ndim , ind , scale = None ): \"\"\"Constructor Args: ndim: Int, number of dimensions ind: Iterable, indices of uniformly distributed entries scale: Iterable, standard deviation of Gaussian or width of uniform distribution \"\"\" super () . __init__ () self . ndim = ndim if isinstance ( ind , int ): ind = [ ind ] # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) if scale is None : self . register_buffer ( \"scale\" , torch . ones ( self . ndim )) else : self . register_buffer ( \"scale\" , scale )","title":"__init__"},{"location":"references/#normflows.distributions.base_test","text":"","title":"base_test"},{"location":"references/#normflows.distributions.decoder","text":"","title":"decoder"},{"location":"references/#normflows.distributions.decoder.BaseDecoder","text":"Bases: Module Source code in normflows/distributions/decoder.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 class BaseDecoder ( nn . Module ): def __init__ ( self ): super () . __init__ () def forward ( self , z ): \"\"\"Decodes z to x Args: z: latent variable Returns: x, std of x \"\"\" raise NotImplementedError def log_prob ( self , x , z ): \"\"\"Log probability Args: x: observable z: latent variable Returns: log(p) of x given z \"\"\" raise NotImplementedError","title":"BaseDecoder"},{"location":"references/#normflows.distributions.decoder.BaseDecoder.forward","text":"Decodes z to x Parameters: Name Type Description Default z latent variable required Returns: Type Description x, std of x Source code in normflows/distributions/decoder.py 10 11 12 13 14 15 16 17 18 19 def forward ( self , z ): \"\"\"Decodes z to x Args: z: latent variable Returns: x, std of x \"\"\" raise NotImplementedError","title":"forward"},{"location":"references/#normflows.distributions.decoder.BaseDecoder.log_prob","text":"Log probability Parameters: Name Type Description Default x observable required z latent variable required Returns: Type Description log(p) of x given z Source code in normflows/distributions/decoder.py 21 22 23 24 25 26 27 28 29 30 31 def log_prob ( self , x , z ): \"\"\"Log probability Args: x: observable z: latent variable Returns: log(p) of x given z \"\"\" raise NotImplementedError","title":"log_prob"},{"location":"references/#normflows.distributions.decoder.NNBernoulliDecoder","text":"Bases: BaseDecoder BaseDecoder representing a Bernoulli distribution with mean parametrized by a NN Source code in normflows/distributions/decoder.py 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 class NNBernoulliDecoder ( BaseDecoder ): \"\"\" BaseDecoder representing a Bernoulli distribution with mean parametrized by a NN \"\"\" def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) \"\"\" super () . __init__ () self . net = net def forward ( self , z ): mean = torch . sigmoid ( self . net ( z )) return mean def log_prob ( self , x , z ): score = self . net ( z ) if len ( z ) > len ( x ): x = x . unsqueeze ( 1 ) x = x . repeat ( 1 , z . size ()[ 0 ] // x . size ()[ 0 ], * (( x . dim () - 2 ) * [ 1 ])) . view ( - 1 , * x . size ()[ 2 :] ) log_sig = lambda a : - torch . relu ( - a ) - torch . log ( 1 + torch . exp ( - torch . abs ( a ))) log_p = torch . sum ( x * log_sig ( score ) + ( 1 - x ) * log_sig ( - score ), list ( range ( 1 , x . dim ())) ) return log_p","title":"NNBernoulliDecoder"},{"location":"references/#normflows.distributions.decoder.NNBernoulliDecoder.__init__","text":"Constructor Parameters: Name Type Description Default net neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) required Source code in normflows/distributions/decoder.py 78 79 80 81 82 83 84 85 def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean Bernoulli (mean = sigmoid(nn_out) \"\"\" super () . __init__ () self . net = net","title":"__init__"},{"location":"references/#normflows.distributions.decoder.NNDiagGaussianDecoder","text":"Bases: BaseDecoder BaseDecoder representing a diagonal Gaussian distribution with mean and std parametrized by a NN Source code in normflows/distributions/decoder.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 class NNDiagGaussianDecoder ( BaseDecoder ): \"\"\" BaseDecoder representing a diagonal Gaussian distribution with mean and std parametrized by a NN \"\"\" def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean and standard deviation of diagonal Gaussian \"\"\" super () . __init__ () self . net = net def forward ( self , z ): mean_std = self . net ( z ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] std = torch . exp ( 0.5 * mean_std [:, n_hidden :, ... ]) return mean , std def log_prob ( self , x , z ): mean_std = self . net ( z ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] var = torch . exp ( mean_std [:, n_hidden :, ... ]) if len ( z ) > len ( x ): x = x . unsqueeze ( 1 ) x = x . repeat ( 1 , z . size ()[ 0 ] // x . size ()[ 0 ], * (( x . dim () - 2 ) * [ 1 ])) . view ( - 1 , * x . size ()[ 2 :] ) log_p = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 1 :])) * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . log ( var ) + ( x - mean ) ** 2 / var , list ( range ( 1 , z . dim ())) ) return log_p","title":"NNDiagGaussianDecoder"},{"location":"references/#normflows.distributions.decoder.NNDiagGaussianDecoder.__init__","text":"Constructor Parameters: Name Type Description Default net neural network parametrizing mean and standard deviation of diagonal Gaussian required Source code in normflows/distributions/decoder.py 39 40 41 42 43 44 45 46 def __init__ ( self , net ): \"\"\"Constructor Args: net: neural network parametrizing mean and standard deviation of diagonal Gaussian \"\"\" super () . __init__ () self . net = net","title":"__init__"},{"location":"references/#normflows.distributions.decoder_test","text":"","title":"decoder_test"},{"location":"references/#normflows.distributions.distribution_test","text":"","title":"distribution_test"},{"location":"references/#normflows.distributions.distribution_test.DistributionTest","text":"Bases: TestCase Generic test case for distribution modules Source code in normflows/distributions/distribution_test.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 class DistributionTest ( unittest . TestCase ): \"\"\" Generic test case for distribution modules \"\"\" def assertClose ( self , actual , expected , atol = None , rtol = None ): assert_close ( actual , expected , atol = atol , rtol = rtol ) def checkForward ( self , distribution , num_samples = 1 , ** kwargs ): # Do forward outputs , log_p = distribution ( num_samples , ** kwargs ) # Check type assert outputs . dtype == log_p . dtype # Check shape assert log_p . shape [ 0 ] == num_samples assert outputs . shape [ 0 ] == num_samples # Check dim assert outputs . dim () > log_p . dim () # Return results return outputs , log_p def checkLogProb ( self , distribution , inputs , ** kwargs ): # Compute log prob log_p = distribution . log_prob ( inputs , ** kwargs ) # Check type assert log_p . dtype == inputs . dtype # Check shape assert log_p . shape [ 0 ] == inputs . shape [ 0 ] # Return results return log_p def checkSample ( self , distribution , num_samples = 1 , ** kwargs ): # Do forward outputs = distribution . sample ( num_samples , ** kwargs ) # Check shape assert outputs . shape [ 0 ] == num_samples # Check dim assert outputs . dim () > 1 # Return results return outputs def checkForwardLogProb ( self , distribution , num_samples = 1 , atol = None , rtol = None , ** kwargs ): # Check forward outputs , log_p = self . checkForward ( distribution , num_samples , ** kwargs ) # Check log prob log_p_ = self . checkLogProb ( distribution , outputs , ** kwargs ) # Check consistency self . assertClose ( log_p_ , log_p , atol , rtol )","title":"DistributionTest"},{"location":"references/#normflows.distributions.encoder","text":"","title":"encoder"},{"location":"references/#normflows.distributions.encoder.BaseEncoder","text":"Bases: Module Base distribution of a flow-based variational autoencoder Parameters of the distribution depend of the target variable x Source code in normflows/distributions/encoder.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 class BaseEncoder ( nn . Module ): \"\"\" Base distribution of a flow-based variational autoencoder Parameters of the distribution depend of the target variable x \"\"\" def __init__ ( self ): super () . __init__ () def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, first dimension is batch size num_samples: number of samples to draw per element of mini-batch Returns sample of z for x, log probability for sample \"\"\" raise NotImplementedError def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch size x: Variable to condition on, first dimension is batch size Returns: log probability of z given x \"\"\" raise NotImplementedError","title":"BaseEncoder"},{"location":"references/#normflows.distributions.encoder.BaseEncoder.forward","text":"Parameters: Name Type Description Default x Variable to condition on, first dimension is batch size required num_samples number of samples to draw per element of mini-batch 1 Returns sample of z for x, log probability for sample Source code in normflows/distributions/encoder.py 15 16 17 18 19 20 21 22 23 24 def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, first dimension is batch size num_samples: number of samples to draw per element of mini-batch Returns sample of z for x, log probability for sample \"\"\" raise NotImplementedError","title":"forward"},{"location":"references/#normflows.distributions.encoder.BaseEncoder.log_prob","text":"Parameters: Name Type Description Default z Primary random variable, first dimension is batch size required x Variable to condition on, first dimension is batch size required Returns: Type Description log probability of z given x Source code in normflows/distributions/encoder.py 26 27 28 29 30 31 32 33 34 35 36 def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch size x: Variable to condition on, first dimension is batch size Returns: log probability of z given x \"\"\" raise NotImplementedError","title":"log_prob"},{"location":"references/#normflows.distributions.encoder.ConstDiagGaussian","text":"Bases: BaseEncoder Source code in normflows/distributions/encoder.py 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 class ConstDiagGaussian ( BaseEncoder ): def __init__ ( self , loc , scale ): \"\"\"Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x Args: loc: mean vector of the distribution scale: vector of the standard deviations on the diagonal of the covariance matrix \"\"\" super () . __init__ () self . d = len ( loc ) if not torch . is_tensor ( loc ): loc = torch . tensor ( loc ) if not torch . is_tensor ( scale ): scale = torch . tensor ( scale ) self . loc = nn . Parameter ( loc . reshape (( 1 , 1 , self . d ))) self . scale = nn . Parameter ( scale ) def forward ( self , x = None , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, will only be used to determine the batch size num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" if x is not None : batch_size = len ( x ) else : batch_size = 1 eps = torch . randn (( batch_size , num_samples , self . d ), device = x . device ) z = self . loc + self . scale * eps log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * torch . pow ( eps , 2 ), 2 ) return z , log_q def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * (( z - self . loc ) / self . scale ) ** 2 , 2 ) return log_q","title":"ConstDiagGaussian"},{"location":"references/#normflows.distributions.encoder.ConstDiagGaussian.__init__","text":"Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x Parameters: Name Type Description Default loc mean vector of the distribution required scale vector of the standard deviations on the diagonal of the covariance matrix required Source code in normflows/distributions/encoder.py 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 def __init__ ( self , loc , scale ): \"\"\"Multivariate Gaussian distribution with diagonal covariance and parameters being constant wrt x Args: loc: mean vector of the distribution scale: vector of the standard deviations on the diagonal of the covariance matrix \"\"\" super () . __init__ () self . d = len ( loc ) if not torch . is_tensor ( loc ): loc = torch . tensor ( loc ) if not torch . is_tensor ( scale ): scale = torch . tensor ( scale ) self . loc = nn . Parameter ( loc . reshape (( 1 , 1 , self . d ))) self . scale = nn . Parameter ( scale )","title":"__init__"},{"location":"references/#normflows.distributions.encoder.ConstDiagGaussian.forward","text":"Parameters: Name Type Description Default x Variable to condition on, will only be used to determine the batch size None num_samples number of samples to draw per element of mini-batch 1 Returns: Type Description sample of z for x, log probability for sample Source code in normflows/distributions/encoder.py 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 def forward ( self , x = None , num_samples = 1 ): \"\"\" Args: x: Variable to condition on, will only be used to determine the batch size num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" if x is not None : batch_size = len ( x ) else : batch_size = 1 eps = torch . randn (( batch_size , num_samples , self . d ), device = x . device ) z = self . loc + self . scale * eps log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * torch . pow ( eps , 2 ), 2 ) return z , log_q","title":"forward"},{"location":"references/#normflows.distributions.encoder.ConstDiagGaussian.log_prob","text":"Parameters: Name Type Description Default z Primary random variable, first dimension is batch dimension required x Variable to condition on, first dimension is batch dimension required Returns: Type Description log probability of z given x Source code in normflows/distributions/encoder.py 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) log_q = - 0.5 * self . d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * (( z - self . loc ) / self . scale ) ** 2 , 2 ) return log_q","title":"log_prob"},{"location":"references/#normflows.distributions.encoder.NNDiagGaussian","text":"Bases: BaseEncoder Diagonal Gaussian distribution with mean and variance determined by a neural network Source code in normflows/distributions/encoder.py 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 class NNDiagGaussian ( BaseEncoder ): \"\"\" Diagonal Gaussian distribution with mean and variance determined by a neural network \"\"\" def __init__ ( self , net ): \"\"\"Construtor Args: net: net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) \"\"\" super () . __init__ () self . net = net def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" batch_size = len ( x ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) std = torch . exp ( 0.5 * mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) eps = torch . randn ( ( batch_size , num_samples ) + tuple ( mean . size ()[ 2 :]), device = x . device ) z = mean + std * eps log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( std ) + 0.5 * torch . pow ( eps , 2 ), list ( range ( 2 , z . dim ()))) return z , log_q def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) var = torch . exp ( mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . log ( var ) + ( z - mean ) ** 2 / var , 2 ) return log_q","title":"NNDiagGaussian"},{"location":"references/#normflows.distributions.encoder.NNDiagGaussian.__init__","text":"Construtor Parameters: Name Type Description Default net net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) required Source code in normflows/distributions/encoder.py 135 136 137 138 139 140 141 142 def __init__ ( self , net ): \"\"\"Construtor Args: net: net computing mean (first n / 2 outputs), standard deviation (second n / 2 outputs) \"\"\" super () . __init__ () self . net = net","title":"__init__"},{"location":"references/#normflows.distributions.encoder.NNDiagGaussian.forward","text":"Parameters: Name Type Description Default x Variable to condition on required num_samples number of samples to draw per element of mini-batch 1 Returns: Type Description sample of z for x, log probability for sample Source code in normflows/distributions/encoder.py 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 def forward ( self , x , num_samples = 1 ): \"\"\" Args: x: Variable to condition on num_samples: number of samples to draw per element of mini-batch Returns: sample of z for x, log probability for sample \"\"\" batch_size = len ( x ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) std = torch . exp ( 0.5 * mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) eps = torch . randn ( ( batch_size , num_samples ) + tuple ( mean . size ()[ 2 :]), device = x . device ) z = mean + std * eps log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( std ) + 0.5 * torch . pow ( eps , 2 ), list ( range ( 2 , z . dim ()))) return z , log_q","title":"forward"},{"location":"references/#normflows.distributions.encoder.NNDiagGaussian.log_prob","text":"Parameters: Name Type Description Default z Primary random variable, first dimension is batch dimension required x Variable to condition on, first dimension is batch dimension required Returns: Type Description log probability of z given x Source code in normflows/distributions/encoder.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 def log_prob ( self , z , x ): \"\"\" Args: z: Primary random variable, first dimension is batch dimension x: Variable to condition on, first dimension is batch dimension Returns: log probability of z given x \"\"\" if z . dim () == 1 : z = z . unsqueeze ( 0 ) if z . dim () == 2 : z = z . unsqueeze ( 0 ) mean_std = self . net ( x ) n_hidden = mean_std . size ()[ 1 ] // 2 mean = mean_std [:, : n_hidden , ... ] . unsqueeze ( 1 ) var = torch . exp ( mean_std [:, n_hidden : ( 2 * n_hidden ), ... ] . unsqueeze ( 1 )) log_q = - 0.5 * torch . prod ( torch . tensor ( z . size ()[ 2 :])) * np . log ( 2 * np . pi ) - 0.5 * torch . sum ( torch . log ( var ) + ( z - mean ) ** 2 / var , 2 ) return log_q","title":"log_prob"},{"location":"references/#normflows.distributions.encoder_test","text":"","title":"encoder_test"},{"location":"references/#normflows.distributions.linear_interpolation","text":"","title":"linear_interpolation"},{"location":"references/#normflows.distributions.linear_interpolation.LinearInterpolation","text":"Linear interpolation of two distributions in the log space Source code in normflows/distributions/linear_interpolation.py 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 class LinearInterpolation : \"\"\" Linear interpolation of two distributions in the log space \"\"\" def __init__ ( self , dist1 , dist2 , alpha ): \"\"\"Constructor Interpolation parameter alpha: ``` log_p = alpha * log_p_1 + (1 - alpha) * log_p_2 ``` Args: dist1: First distribution dist2: Second distribution alpha: Interpolation parameter \"\"\" self . alpha = alpha self . dist1 = dist1 self . dist2 = dist2 def log_prob ( self , z ): return self . alpha * self . dist1 . log_prob ( z ) + ( 1 - self . alpha ) * self . dist2 . log_prob ( z )","title":"LinearInterpolation"},{"location":"references/#normflows.distributions.linear_interpolation.LinearInterpolation.__init__","text":"Constructor Interpolation parameter alpha: log_p = alpha * log_p_1 + (1 - alpha) * log_p_2 Parameters: Name Type Description Default dist1 First distribution required dist2 Second distribution required alpha Interpolation parameter required Source code in normflows/distributions/linear_interpolation.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 def __init__ ( self , dist1 , dist2 , alpha ): \"\"\"Constructor Interpolation parameter alpha: ``` log_p = alpha * log_p_1 + (1 - alpha) * log_p_2 ``` Args: dist1: First distribution dist2: Second distribution alpha: Interpolation parameter \"\"\" self . alpha = alpha self . dist1 = dist1 self . dist2 = dist2","title":"__init__"},{"location":"references/#normflows.distributions.mh_proposal","text":"","title":"mh_proposal"},{"location":"references/#normflows.distributions.mh_proposal.DiagGaussianProposal","text":"Bases: MHProposal Diagonal Gaussian distribution with previous value as mean as a proposal for Metropolis Hastings algorithm Source code in normflows/distributions/mh_proposal.py 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 class DiagGaussianProposal ( MHProposal ): \"\"\" Diagonal Gaussian distribution with previous value as mean as a proposal for Metropolis Hastings algorithm \"\"\" def __init__ ( self , shape , scale ): \"\"\"Constructor Args: shape: Shape of variables to sample scale: Standard deviation of distribution \"\"\" super () . __init__ () self . shape = shape self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu . unsqueeze ( 0 )) def sample ( self , z ): num_samples = len ( z ) eps = torch . randn (( num_samples ,) + self . shape , dtype = z . dtype , device = z . device ) z_ = eps * self . scale + z return z_ def log_prob ( self , z_ , z ): log_p = - 0.5 * np . prod ( self . shape ) * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( self . scale ) + 0.5 * torch . pow (( z_ - z ) / self . scale , 2 ), list ( range ( 1 , z . dim ())), ) return log_p def forward ( self , z ): num_samples = len ( z ) eps = torch . randn (( num_samples ,) + self . shape , dtype = z . dtype , device = z . device ) z_ = eps * self . scale + z log_p_diff = torch . zeros ( num_samples , dtype = z . dtype , device = z . device ) return z_ , log_p_diff","title":"DiagGaussianProposal"},{"location":"references/#normflows.distributions.mh_proposal.DiagGaussianProposal.__init__","text":"Constructor Parameters: Name Type Description Default shape Shape of variables to sample required scale Standard deviation of distribution required Source code in normflows/distributions/mh_proposal.py 53 54 55 56 57 58 59 60 61 62 63 def __init__ ( self , shape , scale ): \"\"\"Constructor Args: shape: Shape of variables to sample scale: Standard deviation of distribution \"\"\" super () . __init__ () self . shape = shape self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu . unsqueeze ( 0 ))","title":"__init__"},{"location":"references/#normflows.distributions.mh_proposal.MHProposal","text":"Bases: Module Proposal distribution for the Metropolis Hastings algorithm Source code in normflows/distributions/mh_proposal.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 class MHProposal ( nn . Module ): \"\"\" Proposal distribution for the Metropolis Hastings algorithm \"\"\" def __init__ ( self ): super () . __init__ () def sample ( self , z ): \"\"\" Sample new value based on previous z \"\"\" raise NotImplementedError def log_prob ( self , z_ , z ): \"\"\" Args: z_: Potential new sample z: Previous sample Returns: Log probability of proposal distribution \"\"\" raise NotImplementedError def forward ( self , z ): \"\"\"Draw samples given z and compute log probability difference ``` log(p(z | z_new)) - log(p(z_new | z)) ``` Args: z: Previous samples Returns: Proposal, difference of log probability ratio \"\"\" raise NotImplementedError","title":"MHProposal"},{"location":"references/#normflows.distributions.mh_proposal.MHProposal.forward","text":"Draw samples given z and compute log probability difference log(p(z | z_new)) - log(p(z_new | z)) Parameters: Name Type Description Default z Previous samples required Returns: Type Description Proposal, difference of log probability ratio Source code in normflows/distributions/mh_proposal.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 def forward ( self , z ): \"\"\"Draw samples given z and compute log probability difference ``` log(p(z | z_new)) - log(p(z_new | z)) ``` Args: z: Previous samples Returns: Proposal, difference of log probability ratio \"\"\" raise NotImplementedError","title":"forward"},{"location":"references/#normflows.distributions.mh_proposal.MHProposal.log_prob","text":"Parameters: Name Type Description Default z_ Potential new sample required z Previous sample required Returns: Type Description Log probability of proposal distribution Source code in normflows/distributions/mh_proposal.py 20 21 22 23 24 25 26 27 28 29 def log_prob ( self , z_ , z ): \"\"\" Args: z_: Potential new sample z: Previous sample Returns: Log probability of proposal distribution \"\"\" raise NotImplementedError","title":"log_prob"},{"location":"references/#normflows.distributions.mh_proposal.MHProposal.sample","text":"Sample new value based on previous z Source code in normflows/distributions/mh_proposal.py 14 15 16 17 18 def sample ( self , z ): \"\"\" Sample new value based on previous z \"\"\" raise NotImplementedError","title":"sample"},{"location":"references/#normflows.distributions.prior","text":"","title":"prior"},{"location":"references/#normflows.distributions.prior.ImagePrior","text":"Bases: Module Intensities of an image determine probability density of prior Source code in normflows/distributions/prior.py 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 class ImagePrior ( nn . Module ): \"\"\" Intensities of an image determine probability density of prior \"\"\" def __init__ ( self , image , x_range = [ - 3 , 3 ], y_range = [ - 3 , 3 ], eps = 1.0e-10 ): \"\"\"Constructor Args: image: image as np matrix x_range: x range to position image at y_range: y range to position image at eps: small value to add to image to avoid log(0) problems \"\"\" super () . __init__ () image_ = np . flip ( image , 0 ) . transpose () + eps self . image_cpu = torch . tensor ( image_ / np . max ( image_ )) self . image_size_cpu = self . image_cpu . size () self . x_range = torch . tensor ( x_range ) self . y_range = torch . tensor ( y_range ) self . register_buffer ( \"image\" , self . image_cpu ) self . register_buffer ( \"image_size\" , torch . tensor ( self . image_size_cpu ) . unsqueeze ( 0 ) ) self . register_buffer ( \"density\" , torch . log ( self . image_cpu / torch . sum ( self . image_cpu )) ) self . register_buffer ( \"scale\" , torch . tensor ( [[ self . x_range [ 1 ] - self . x_range [ 0 ], self . y_range [ 1 ] - self . y_range [ 0 ]]] ), ) self . register_buffer ( \"shift\" , torch . tensor ([[ self . x_range [ 0 ], self . y_range [ 0 ]]]) ) def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" z_ = torch . clamp (( z - self . shift ) / self . scale , max = 1 , min = 0 ) ind = ( z_ * ( self . image_size - 1 )) . long () return self . density [ ind [:, 0 ], ind [:, 1 ]] def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" z_ = torch . rand ( ( num_steps , 2 ), dtype = self . image . dtype , device = self . image . device ) prob = torch . rand ( num_steps , dtype = self . image . dtype , device = self . image . device ) ind = ( z_ * ( self . image_size - 1 )) . long () intensity = self . image [ ind [:, 0 ], ind [:, 1 ]] accept = intensity > prob z = z_ [ accept , :] * self . scale + self . shift return z def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . ones (( 0 , 2 ), dtype = self . image . dtype , device = self . image . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z","title":"ImagePrior"},{"location":"references/#normflows.distributions.prior.ImagePrior.__init__","text":"Constructor Parameters: Name Type Description Default image image as np matrix required x_range x range to position image at [-3, 3] y_range y range to position image at [-3, 3] eps small value to add to image to avoid log(0) problems 1e-10 Source code in normflows/distributions/prior.py 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 def __init__ ( self , image , x_range = [ - 3 , 3 ], y_range = [ - 3 , 3 ], eps = 1.0e-10 ): \"\"\"Constructor Args: image: image as np matrix x_range: x range to position image at y_range: y range to position image at eps: small value to add to image to avoid log(0) problems \"\"\" super () . __init__ () image_ = np . flip ( image , 0 ) . transpose () + eps self . image_cpu = torch . tensor ( image_ / np . max ( image_ )) self . image_size_cpu = self . image_cpu . size () self . x_range = torch . tensor ( x_range ) self . y_range = torch . tensor ( y_range ) self . register_buffer ( \"image\" , self . image_cpu ) self . register_buffer ( \"image_size\" , torch . tensor ( self . image_size_cpu ) . unsqueeze ( 0 ) ) self . register_buffer ( \"density\" , torch . log ( self . image_cpu / torch . sum ( self . image_cpu )) ) self . register_buffer ( \"scale\" , torch . tensor ( [[ self . x_range [ 1 ] - self . x_range [ 0 ], self . y_range [ 1 ] - self . y_range [ 0 ]]] ), ) self . register_buffer ( \"shift\" , torch . tensor ([[ self . x_range [ 0 ], self . y_range [ 0 ]]]) )","title":"__init__"},{"location":"references/#normflows.distributions.prior.ImagePrior.log_prob","text":"Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 59 60 61 62 63 64 65 66 67 68 69 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" z_ = torch . clamp (( z - self . shift ) / self . scale , max = 1 , min = 0 ) ind = ( z_ * ( self . image_size - 1 )) . long () return self . density [ ind [:, 0 ], ind [:, 1 ]]","title":"log_prob"},{"location":"references/#normflows.distributions.prior.ImagePrior.rejection_sampling","text":"Perform rejection sampling on image distribution Parameters: Name Type Description Default num_steps Number of rejection sampling steps to perform 1 Returns: Type Description Accepted samples Source code in normflows/distributions/prior.py 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" z_ = torch . rand ( ( num_steps , 2 ), dtype = self . image . dtype , device = self . image . device ) prob = torch . rand ( num_steps , dtype = self . image . dtype , device = self . image . device ) ind = ( z_ * ( self . image_size - 1 )) . long () intensity = self . image [ ind [:, 0 ], ind [:, 1 ]] accept = intensity > prob z = z_ [ accept , :] * self . scale + self . shift return z","title":"rejection_sampling"},{"location":"references/#normflows.distributions.prior.ImagePrior.sample","text":"Sample from image distribution through rejection sampling Parameters: Name Type Description Default num_samples Number of samples to draw 1 Returns: Type Description Samples Source code in normflows/distributions/prior.py 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . ones (( 0 , 2 ), dtype = self . image . dtype , device = self . image . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z","title":"sample"},{"location":"references/#normflows.distributions.prior.PriorDistribution","text":"Source code in normflows/distributions/prior.py 6 7 8 9 10 11 12 13 14 15 16 17 18 class PriorDistribution : def __init__ ( self ): raise NotImplementedError def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError","title":"PriorDistribution"},{"location":"references/#normflows.distributions.prior.PriorDistribution.log_prob","text":"Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 10 11 12 13 14 15 16 17 18 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError","title":"log_prob"},{"location":"references/#normflows.distributions.prior.Sinusoidal","text":"Bases: PriorDistribution Source code in normflows/distributions/prior.py 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 class Sinusoidal ( PriorDistribution ): def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density given by ``` w_1(z) = sin(2*pi / period * z[0]) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 ``` Args: scale: scale of the distribution, see formula period: period of the sinosoidal \"\"\" self . scale = scale self . period = period def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 w_1(z) = sin(2*pi / period * z[0]) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) log_prob = ( - 0.5 * (( z_ [ 1 ] - w_1 ( z_ )) / ( self . scale )) ** 2 - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) # add Gaussian envelope for valid p(z) return log_prob","title":"Sinusoidal"},{"location":"references/#normflows.distributions.prior.Sinusoidal.__init__","text":"Distribution 2d with sinusoidal density given by w_1(z) = sin(2*pi / period * z[0]) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 Parameters: Name Type Description Default scale scale of the distribution, see formula required period period of the sinosoidal required Source code in normflows/distributions/prior.py 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density given by ``` w_1(z) = sin(2*pi / period * z[0]) log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 ``` Args: scale: scale of the distribution, see formula period: period of the sinosoidal \"\"\" self . scale = scale self . period = period","title":"__init__"},{"location":"references/#normflows.distributions.prior.Sinusoidal.log_prob","text":"log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 w_1(z) = sin(2*pi / period * z[0]) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((z[1] - w_1(z)) / (2 * scale)) ** 2 w_1(z) = sin(2*pi / period * z[0]) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) log_prob = ( - 0.5 * (( z_ [ 1 ] - w_1 ( z_ )) / ( self . scale )) ** 2 - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) # add Gaussian envelope for valid p(z) return log_prob","title":"log_prob"},{"location":"references/#normflows.distributions.prior.Sinusoidal_gap","text":"Bases: PriorDistribution Source code in normflows/distributions/prior.py 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 class Sinusoidal_gap ( PriorDistribution ): def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with gap given by ``` w_1(z) = sin(2*pi / period * z[0]) w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w2_scale = 0.6 self . w2_amp = 3.0 self . w2_mu = 1.0 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_2 = lambda x : self . w2_amp * torch . exp ( - 0.5 * (( z_ [ 0 ] - self . w2_mu ) / self . w2_scale ) ** 2 ) eps = torch . abs ( w_2 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_2 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / self . scale ) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob","title":"Sinusoidal_gap"},{"location":"references/#normflows.distributions.prior.Sinusoidal_gap.__init__","text":"Distribution 2d with sinusoidal density with gap given by w_1(z) = sin(2*pi / period * z[0]) w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2)) Parameters: Name Type Description Default loc distance of modes from the origin required scale scale of modes required Source code in normflows/distributions/prior.py 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with gap given by ``` w_1(z) = sin(2*pi / period * z[0]) w_2(z) = 3 * exp(-0.5 * ((z[0] - 1) / 0.6) ** 2) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.35) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_2(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w2_scale = 0.6 self . w2_amp = 3.0 self . w2_mu = 1.0","title":"__init__"},{"location":"references/#normflows.distributions.prior.Sinusoidal_gap.log_prob","text":"Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_2 = lambda x : self . w2_amp * torch . exp ( - 0.5 * (( z_ [ 0 ] - self . w2_mu ) / self . w2_scale ) ** 2 ) eps = torch . abs ( w_2 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_2 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / self . scale ) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob","title":"log_prob"},{"location":"references/#normflows.distributions.prior.Sinusoidal_split","text":"Bases: PriorDistribution Source code in normflows/distributions/prior.py 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 class Sinusoidal_split ( PriorDistribution ): def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with split given by ``` w_1(z) = sin(2*pi / period * z[0]) w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w3_scale = 0.3 self . w3_amp = 3.0 self . w3_mu = 1.0 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_3 = lambda x : self . w3_amp * torch . sigmoid ( ( z_ [ 0 ] - self . w3_mu ) / self . w3_scale ) eps = torch . abs ( w_3 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_3 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / ( self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob","title":"Sinusoidal_split"},{"location":"references/#normflows.distributions.prior.Sinusoidal_split.__init__","text":"Distribution 2d with sinusoidal density with split given by w_1(z) = sin(2*pi / period * z[0]) w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2)) Parameters: Name Type Description Default loc distance of modes from the origin required scale scale of modes required Source code in normflows/distributions/prior.py 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 def __init__ ( self , scale , period ): \"\"\"Distribution 2d with sinusoidal density with split given by ``` w_1(z) = sin(2*pi / period * z[0]) w_3(z) = 3 * sigmoid((z[0] - 1) / 0.3) log(p) = -log(exp(-0.5 * ((z[1] - w_1(z)) / 0.4) ** 2) + exp(-0.5 * ((z[1] - w_1(z) + w_3(z)) / 0.35) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . scale = scale self . period = period self . w3_scale = 0.3 self . w3_amp = 3.0 self . w3_mu = 1.0","title":"__init__"},{"location":"references/#normflows.distributions.prior.Sinusoidal_split.log_prob","text":"Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z w_1 = lambda x : torch . sin ( 2 * np . pi / self . period * z_ [ 0 ]) w_3 = lambda x : self . w3_amp * torch . sigmoid ( ( z_ [ 0 ] - self . w3_mu ) / self . w3_scale ) eps = torch . abs ( w_3 ( z_ ) / 2 ) a = torch . abs ( z_ [ 1 ] - w_1 ( z_ ) + w_3 ( z_ ) / 2 ) log_prob = ( - 0.5 * (( a - eps ) / ( self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( eps * a ) / self . scale ** 2 )) - 0.5 * ( torch . norm ( z_ , dim = 0 , p = 4 ) / ( 20 * self . scale )) ** 4 ) return log_prob","title":"log_prob"},{"location":"references/#normflows.distributions.prior.Smiley","text":"Bases: PriorDistribution Source code in normflows/distributions/prior.py 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 class Smiley ( PriorDistribution ): def __init__ ( self , scale ): \"\"\"Distribution 2d of a smiley :) Args: scale: scale of the smiley \"\"\" self . scale = scale self . loc = 2.0 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z log_prob = ( - 0.5 * (( torch . norm ( z_ , dim = 0 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( torch . abs ( z_ [ 1 ] + 0.8 ) - 1.2 ) / ( 2 * self . scale )) ** 2 ) return log_prob","title":"Smiley"},{"location":"references/#normflows.distributions.prior.Smiley.__init__","text":"Distribution 2d of a smiley :) Parameters: Name Type Description Default scale scale of the smiley required Source code in normflows/distributions/prior.py 300 301 302 303 304 305 306 307 def __init__ ( self , scale ): \"\"\"Distribution 2d of a smiley :) Args: scale: scale of the smiley \"\"\" self . scale = scale self . loc = 2.0","title":"__init__"},{"location":"references/#normflows.distributions.prior.Smiley.log_prob","text":"Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" if z . dim () > 1 : z_ = z . permute (( z . dim () - 1 ,) + tuple ( range ( 0 , z . dim () - 1 ))) else : z_ = z log_prob = ( - 0.5 * (( torch . norm ( z_ , dim = 0 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( torch . abs ( z_ [ 1 ] + 0.8 ) - 1.2 ) / ( 2 * self . scale )) ** 2 ) return log_prob","title":"log_prob"},{"location":"references/#normflows.distributions.prior.TwoModes","text":"Bases: PriorDistribution Source code in normflows/distributions/prior.py 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 class TwoModes ( PriorDistribution ): def __init__ ( self , loc , scale ): \"\"\"Distribution 2d with two modes Distribution 2d with two modes at ```z[0] = -loc``` and ```z[0] = loc``` following the density ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . loc = loc self . scale = scale def log_prob ( self , z ): \"\"\" ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) eps = torch . abs ( torch . tensor ( self . loc )) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( a - eps ) / ( 3 * self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( a * eps ) / ( 3 * self . scale ) ** 2 )) ) return log_prob","title":"TwoModes"},{"location":"references/#normflows.distributions.prior.TwoModes.__init__","text":"Distribution 2d with two modes Distribution 2d with two modes at z[0] = -loc and z[0] = loc following the density log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) Args: loc: distance of modes from the origin scale: scale of modes Source code in normflows/distributions/prior.py 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 def __init__ ( self , loc , scale ): \"\"\"Distribution 2d with two modes Distribution 2d with two modes at ```z[0] = -loc``` and ```z[0] = loc``` following the density ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: loc: distance of modes from the origin scale: scale of modes \"\"\" self . loc = loc self . scale = scale","title":"__init__"},{"location":"references/#normflows.distributions.prior.TwoModes.log_prob","text":"log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/prior.py 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 def log_prob ( self , z ): \"\"\" ``` log(p) = 1/2 * ((norm(z) - loc) / (2 * scale)) ** 2 - log(exp(-1/2 * ((z[0] - loc) / (3 * scale)) ** 2) + exp(-1/2 * ((z[0] + loc) / (3 * scale)) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) eps = torch . abs ( torch . tensor ( self . loc )) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - self . loc ) / ( 2 * self . scale )) ** 2 - 0.5 * (( a - eps ) / ( 3 * self . scale )) ** 2 + torch . log ( 1 + torch . exp ( - 2 * ( a * eps ) / ( 3 * self . scale ) ** 2 )) ) return log_prob","title":"log_prob"},{"location":"references/#normflows.distributions.prior_test","text":"","title":"prior_test"},{"location":"references/#normflows.distributions.target","text":"","title":"target"},{"location":"references/#normflows.distributions.target.CircularGaussianMixture","text":"Bases: Module Two-dimensional Gaussian mixture arranged in a circle Source code in normflows/distributions/target.py 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 class CircularGaussianMixture ( nn . Module ): \"\"\" Two-dimensional Gaussian mixture arranged in a circle \"\"\" def __init__ ( self , n_modes = 8 ): \"\"\"Constructor Args: n_modes: Number of modes \"\"\" super ( CircularGaussianMixture , self ) . __init__ () self . n_modes = n_modes self . register_buffer ( \"scale\" , torch . tensor ( 2 / 3 * np . sin ( np . pi / self . n_modes )) . float () ) def log_prob ( self , z ): d = torch . zeros (( len ( z ), 0 ), dtype = z . dtype , device = z . device ) for i in range ( self . n_modes ): d_ = ( ( z [:, 0 ] - 2 * np . sin ( 2 * np . pi / self . n_modes * i )) ** 2 + ( z [:, 1 ] - 2 * np . cos ( 2 * np . pi / self . n_modes * i )) ** 2 ) / ( 2 * self . scale ** 2 ) d = torch . cat (( d , d_ [:, None ]), 1 ) log_p = - torch . log ( 2 * np . pi * self . scale ** 2 * self . n_modes ) + torch . logsumexp ( - d , 1 ) return log_p def sample ( self , num_samples = 1 ): eps = torch . randn ( ( num_samples , 2 ), dtype = self . scale . dtype , device = self . scale . device ) phi = ( 2 * np . pi / self . n_modes * torch . randint ( 0 , self . n_modes , ( num_samples ,), device = self . scale . device ) ) loc = torch . stack (( 2 * torch . sin ( phi ), 2 * torch . cos ( phi )), 1 ) . type ( eps . dtype ) return eps * self . scale + loc","title":"CircularGaussianMixture"},{"location":"references/#normflows.distributions.target.CircularGaussianMixture.__init__","text":"Constructor Parameters: Name Type Description Default n_modes Number of modes 8 Source code in normflows/distributions/target.py 137 138 139 140 141 142 143 144 145 146 147 def __init__ ( self , n_modes = 8 ): \"\"\"Constructor Args: n_modes: Number of modes \"\"\" super ( CircularGaussianMixture , self ) . __init__ () self . n_modes = n_modes self . register_buffer ( \"scale\" , torch . tensor ( 2 / 3 * np . sin ( np . pi / self . n_modes )) . float () )","title":"__init__"},{"location":"references/#normflows.distributions.target.ConditionalDiagGaussian","text":"Bases: Target Gaussian distribution conditioned on its mean and standard deviation The first half of the entries of the condition, also called context, are the mean, while the second half are the standard deviation. Source code in normflows/distributions/target.py 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 class ConditionalDiagGaussian ( Target ): \"\"\" Gaussian distribution conditioned on its mean and standard deviation The first half of the entries of the condition, also called context, are the mean, while the second half are the standard deviation. \"\"\" def log_prob ( self , z , context = None ): d = z . shape [ - 1 ] loc = context [:, : d ] scale = context [:, d :] log_p = - 0.5 * d * np . log ( 2 * np . pi ) - torch . sum ( torch . log ( scale ) + 0.5 * torch . pow (( z - loc ) / scale , 2 ), dim =- 1 ) return log_p def sample ( self , num_samples = 1 , context = None ): d = context . shape [ - 1 ] // 2 loc = context [:, : d ] scale = context [:, d :] eps = torch . randn ( ( num_samples , d ), dtype = context . dtype , device = context . device ) z = loc + scale * eps return z","title":"ConditionalDiagGaussian"},{"location":"references/#normflows.distributions.target.RingMixture","text":"Bases: Target Mixture of ring distributions in two dimensions Source code in normflows/distributions/target.py 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 class RingMixture ( Target ): \"\"\" Mixture of ring distributions in two dimensions \"\"\" def __init__ ( self , n_rings = 2 ): super () . __init__ () self . n_dims = 2 self . max_log_prob = 0.0 self . n_rings = n_rings self . scale = 1 / 4 / self . n_rings def log_prob ( self , z ): d = torch . zeros (( len ( z ), 0 ), dtype = z . dtype , device = z . device ) for i in range ( self . n_rings ): d_ = (( torch . norm ( z , dim = 1 ) - 2 / self . n_rings * ( i + 1 )) ** 2 ) / ( 2 * self . scale ** 2 ) d = torch . cat (( d , d_ [:, None ]), 1 ) return torch . logsumexp ( - d , 1 )","title":"RingMixture"},{"location":"references/#normflows.distributions.target.Target","text":"Bases: Module Sample target distributions to test models Source code in normflows/distributions/target.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 class Target ( nn . Module ): \"\"\" Sample target distributions to test models \"\"\" def __init__ ( self , prop_scale = torch . tensor ( 6.0 ), prop_shift = torch . tensor ( - 3.0 )): \"\"\"Constructor Args: prop_scale: Scale for the uniform proposal prop_shift: Shift for the uniform proposal \"\"\" super () . __init__ () self . register_buffer ( \"prop_scale\" , prop_scale ) self . register_buffer ( \"prop_shift\" , prop_shift ) def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError ( \"The log probability is not implemented yet.\" ) def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" eps = torch . rand ( ( num_steps , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device , ) z_ = self . prop_scale * eps + self . prop_shift prob = torch . rand ( num_steps , dtype = self . prop_scale . dtype , device = self . prop_scale . device ) prob_ = torch . exp ( self . log_prob ( z_ ) - self . max_log_prob ) accept = prob_ > prob z = z_ [ accept , :] return z def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . zeros ( ( 0 , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z","title":"Target"},{"location":"references/#normflows.distributions.target.Target.__init__","text":"Constructor Parameters: Name Type Description Default prop_scale Scale for the uniform proposal tensor (6.0) prop_shift Shift for the uniform proposal tensor (-3.0) Source code in normflows/distributions/target.py 13 14 15 16 17 18 19 20 21 22 def __init__ ( self , prop_scale = torch . tensor ( 6.0 ), prop_shift = torch . tensor ( - 3.0 )): \"\"\"Constructor Args: prop_scale: Scale for the uniform proposal prop_shift: Shift for the uniform proposal \"\"\" super () . __init__ () self . register_buffer ( \"prop_scale\" , prop_scale ) self . register_buffer ( \"prop_shift\" , prop_shift )","title":"__init__"},{"location":"references/#normflows.distributions.target.Target.log_prob","text":"Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/target.py 24 25 26 27 28 29 30 31 32 def log_prob ( self , z ): \"\"\" Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" raise NotImplementedError ( \"The log probability is not implemented yet.\" )","title":"log_prob"},{"location":"references/#normflows.distributions.target.Target.rejection_sampling","text":"Perform rejection sampling on image distribution Parameters: Name Type Description Default num_steps Number of rejection sampling steps to perform 1 Returns: Type Description Accepted samples Source code in normflows/distributions/target.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 def rejection_sampling ( self , num_steps = 1 ): \"\"\"Perform rejection sampling on image distribution Args: num_steps: Number of rejection sampling steps to perform Returns: Accepted samples \"\"\" eps = torch . rand ( ( num_steps , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device , ) z_ = self . prop_scale * eps + self . prop_shift prob = torch . rand ( num_steps , dtype = self . prop_scale . dtype , device = self . prop_scale . device ) prob_ = torch . exp ( self . log_prob ( z_ ) - self . max_log_prob ) accept = prob_ > prob z = z_ [ accept , :] return z","title":"rejection_sampling"},{"location":"references/#normflows.distributions.target.Target.sample","text":"Sample from image distribution through rejection sampling Parameters: Name Type Description Default num_samples Number of samples to draw 1 Returns: Type Description Samples Source code in normflows/distributions/target.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 def sample ( self , num_samples = 1 ): \"\"\"Sample from image distribution through rejection sampling Args: num_samples: Number of samples to draw Returns: Samples \"\"\" z = torch . zeros ( ( 0 , self . n_dims ), dtype = self . prop_scale . dtype , device = self . prop_scale . device ) while len ( z ) < num_samples : z_ = self . rejection_sampling ( num_samples ) ind = np . min ([ len ( z_ ), num_samples - len ( z )]) z = torch . cat ([ z , z_ [: ind , :]], 0 ) return z","title":"sample"},{"location":"references/#normflows.distributions.target.TwoIndependent","text":"Bases: Target Target distribution that combines two independent distributions of equal size into one distribution. This is needed for Augmented Normalizing Flows, see https://arxiv.org/abs/2002.07101 Source code in normflows/distributions/target.py 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 class TwoIndependent ( Target ): \"\"\" Target distribution that combines two independent distributions of equal size into one distribution. This is needed for Augmented Normalizing Flows, see https://arxiv.org/abs/2002.07101 \"\"\" def __init__ ( self , target1 , target2 ): super () . __init__ () self . target1 = target1 self . target2 = target2 self . split = Split ( mode = 'channel' ) def log_prob ( self , z ): z1 , z2 = self . split ( z )[ 0 ] return self . target1 . log_prob ( z1 ) + self . target2 . log_prob ( z2 ) def sample ( self , num_samples = 1 ): z1 = self . target1 . sample ( num_samples ) z2 = self . target2 . sample ( num_samples ) return self . split . inverse ([ z1 , z2 ])[ 0 ]","title":"TwoIndependent"},{"location":"references/#normflows.distributions.target.TwoMoons","text":"Bases: Target Bimodal two-dimensional distribution Source code in normflows/distributions/target.py 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 class TwoMoons ( Target ): \"\"\" Bimodal two-dimensional distribution \"\"\" def __init__ ( self ): super () . __init__ () self . n_dims = 2 self . max_log_prob = 0.0 def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2 + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2) + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - 2 ) / 0.2 ) ** 2 - 0.5 * (( a - 2 ) / 0.3 ) ** 2 + torch . log ( 1 + torch . exp ( - 4 * a / 0.09 )) ) return log_prob","title":"TwoMoons"},{"location":"references/#normflows.distributions.target.TwoMoons.log_prob","text":"log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2 + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2) + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2)) Parameters: Name Type Description Default z value or batch of latent variable required Returns: Type Description log probability of the distribution for z Source code in normflows/distributions/target.py 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def log_prob ( self , z ): \"\"\" ``` log(p) = - 1/2 * ((norm(z) - 2) / 0.2) ** 2 + log( exp(-1/2 * ((z[0] - 2) / 0.3) ** 2) + exp(-1/2 * ((z[0] + 2) / 0.3) ** 2)) ``` Args: z: value or batch of latent variable Returns: log probability of the distribution for z \"\"\" a = torch . abs ( z [:, 0 ]) log_prob = ( - 0.5 * (( torch . norm ( z , dim = 1 ) - 2 ) / 0.2 ) ** 2 - 0.5 * (( a - 2 ) / 0.3 ) ** 2 + torch . log ( 1 + torch . exp ( - 4 * a / 0.09 )) ) return log_prob","title":"log_prob"},{"location":"references/#normflows.distributions.target_test","text":"","title":"target_test"},{"location":"references/#normflows.flows","text":"","title":"flows"},{"location":"references/#normflows.flows.affine","text":"","title":"affine"},{"location":"references/#normflows.flows.affine.autoregressive","text":"","title":"autoregressive"},{"location":"references/#normflows.flows.affine.autoregressive.Autoregressive","text":"Bases: Flow Transforms each input variable with an invertible elementwise transformation. The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables. NOTE Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform. Source code in normflows/flows/affine/autoregressive.py 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 class Autoregressive ( Flow ): \"\"\"Transforms each input variable with an invertible elementwise transformation. The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables. **NOTE** Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform. \"\"\" def __init__ ( self , autoregressive_net ): super ( Autoregressive , self ) . __init__ () self . autoregressive_net = autoregressive_net def forward ( self , inputs , context = None ): autoregressive_params = self . autoregressive_net ( inputs , context ) outputs , logabsdet = self . _elementwise_forward ( inputs , autoregressive_params ) return outputs , logabsdet def inverse ( self , inputs , context = None ): num_inputs = np . prod ( inputs . shape [ 1 :]) outputs = torch . zeros_like ( inputs ) logabsdet = None for _ in range ( num_inputs ): autoregressive_params = self . autoregressive_net ( outputs , context ) outputs , logabsdet = self . _elementwise_inverse ( inputs , autoregressive_params ) return outputs , logabsdet def _output_dim_multiplier ( self ): raise NotImplementedError () def _elementwise_forward ( self , inputs , autoregressive_params ): raise NotImplementedError () def _elementwise_inverse ( self , inputs , autoregressive_params ): raise NotImplementedError ()","title":"Autoregressive"},{"location":"references/#normflows.flows.affine.autoregressive.MaskedAffineAutoregressive","text":"Bases: Autoregressive Masked affine autoregressive flow, mostly referred to as Masked Autoregressive Flow (MAF), see arXiv 1705.07057 . Source code in normflows/flows/affine/autoregressive.py 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 class MaskedAffineAutoregressive ( Autoregressive ): \"\"\" Masked affine autoregressive flow, mostly referred to as Masked Autoregressive Flow (MAF), see [arXiv 1705.07057](https://arxiv.org/abs/1705.07057). \"\"\" def __init__ ( self , features , hidden_features , context_features = None , num_blocks = 2 , use_residual_blocks = True , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , ): \"\"\"Constructor Args: features: Number of features/input dimensions hidden_features: Number of hidden units in the MADE network context_features: Number of context/conditional features num_blocks: Number of blocks in the MADE network use_residual_blocks: Flag whether residual blocks should be used random_mask: Flag whether to use random masks activation: Activation function to be used in the MADE network dropout_probability: Dropout probability in the MADE network use_batch_norm: Flag whether batch normalization should be used \"\"\" self . features = features made = made_module . MADE ( features = features , hidden_features = hidden_features , context_features = context_features , num_blocks = num_blocks , output_multiplier = self . _output_dim_multiplier (), use_residual_blocks = use_residual_blocks , random_mask = random_mask , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) super ( MaskedAffineAutoregressive , self ) . __init__ ( made ) def _output_dim_multiplier ( self ): return 2 def _elementwise_forward ( self , inputs , autoregressive_params ): unconstrained_scale , shift = self . _unconstrained_scale_and_shift ( autoregressive_params ) scale = torch . sigmoid ( unconstrained_scale + 2.0 ) + 1e-3 log_scale = torch . log ( scale ) outputs = scale * inputs + shift logabsdet = utils . sum_except_batch ( log_scale , num_batch_dims = 1 ) return outputs , logabsdet def _elementwise_inverse ( self , inputs , autoregressive_params ): unconstrained_scale , shift = self . _unconstrained_scale_and_shift ( autoregressive_params ) scale = torch . sigmoid ( unconstrained_scale + 2.0 ) + 1e-3 log_scale = torch . log ( scale ) outputs = ( inputs - shift ) / scale logabsdet = - utils . sum_except_batch ( log_scale , num_batch_dims = 1 ) return outputs , logabsdet def _unconstrained_scale_and_shift ( self , autoregressive_params ): # split_idx = autoregressive_params.size(1) // 2 # unconstrained_scale = autoregressive_params[..., :split_idx] # shift = autoregressive_params[..., split_idx:] # return unconstrained_scale, shift autoregressive_params = autoregressive_params . view ( - 1 , self . features , self . _output_dim_multiplier () ) unconstrained_scale = autoregressive_params [ ... , 0 ] shift = autoregressive_params [ ... , 1 ] return unconstrained_scale , shift","title":"MaskedAffineAutoregressive"},{"location":"references/#normflows.flows.affine.autoregressive.MaskedAffineAutoregressive.__init__","text":"Constructor Parameters: Name Type Description Default features Number of features/input dimensions required hidden_features Number of hidden units in the MADE network required context_features Number of context/conditional features None num_blocks Number of blocks in the MADE network 2 use_residual_blocks Flag whether residual blocks should be used True random_mask Flag whether to use random masks False activation Activation function to be used in the MADE network relu dropout_probability Dropout probability in the MADE network 0.0 use_batch_norm Flag whether batch normalization should be used False Source code in normflows/flows/affine/autoregressive.py 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 def __init__ ( self , features , hidden_features , context_features = None , num_blocks = 2 , use_residual_blocks = True , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , ): \"\"\"Constructor Args: features: Number of features/input dimensions hidden_features: Number of hidden units in the MADE network context_features: Number of context/conditional features num_blocks: Number of blocks in the MADE network use_residual_blocks: Flag whether residual blocks should be used random_mask: Flag whether to use random masks activation: Activation function to be used in the MADE network dropout_probability: Dropout probability in the MADE network use_batch_norm: Flag whether batch normalization should be used \"\"\" self . features = features made = made_module . MADE ( features = features , hidden_features = hidden_features , context_features = context_features , num_blocks = num_blocks , output_multiplier = self . _output_dim_multiplier (), use_residual_blocks = use_residual_blocks , random_mask = random_mask , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) super ( MaskedAffineAutoregressive , self ) . __init__ ( made )","title":"__init__"},{"location":"references/#normflows.flows.affine.autoregressive_test","text":"","title":"autoregressive_test"},{"location":"references/#normflows.flows.affine.coupling","text":"","title":"coupling"},{"location":"references/#normflows.flows.affine.coupling.AffineConstFlow","text":"Bases: Flow scales and shifts with learned constants per dimension. In the NICE paper there is a scaling layer which is a special case of this where t is None Source code in normflows/flows/affine/coupling.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 class AffineConstFlow ( Flow ): \"\"\" scales and shifts with learned constants per dimension. In the NICE paper there is a scaling layer which is a special case of this where t is None \"\"\" def __init__ ( self , shape , scale = True , shift = True ): \"\"\"Constructor Args: shape: Shape of the coupling layer scale: Flag whether to apply scaling shift: Flag whether to apply shift logscale_factor: Optional factor which can be used to control the scale of the log scale factor \"\"\" super () . __init__ () if scale : self . s = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"s\" , torch . zeros ( shape )[ None ]) if shift : self . t = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"t\" , torch . zeros ( shape )[ None ]) self . n_dim = self . s . dim () self . batch_dims = torch . nonzero ( torch . tensor ( self . s . shape ) == 1 , as_tuple = False )[:, 0 ] . tolist () def forward ( self , z ): z_ = z * torch . exp ( self . s ) + self . t if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = prod_batch_dims * torch . sum ( self . s ) return z_ , log_det def inverse ( self , z ): z_ = ( z - self . t ) * torch . exp ( - self . s ) if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = - prod_batch_dims * torch . sum ( self . s ) return z_ , log_det","title":"AffineConstFlow"},{"location":"references/#normflows.flows.affine.coupling.AffineConstFlow.__init__","text":"Constructor Parameters: Name Type Description Default shape Shape of the coupling layer required scale Flag whether to apply scaling True shift Flag whether to apply shift True logscale_factor Optional factor which can be used to control the scale of the log scale factor required Source code in normflows/flows/affine/coupling.py 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 def __init__ ( self , shape , scale = True , shift = True ): \"\"\"Constructor Args: shape: Shape of the coupling layer scale: Flag whether to apply scaling shift: Flag whether to apply shift logscale_factor: Optional factor which can be used to control the scale of the log scale factor \"\"\" super () . __init__ () if scale : self . s = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"s\" , torch . zeros ( shape )[ None ]) if shift : self . t = nn . Parameter ( torch . zeros ( shape )[ None ]) else : self . register_buffer ( \"t\" , torch . zeros ( shape )[ None ]) self . n_dim = self . s . dim () self . batch_dims = torch . nonzero ( torch . tensor ( self . s . shape ) == 1 , as_tuple = False )[:, 0 ] . tolist ()","title":"__init__"},{"location":"references/#normflows.flows.affine.coupling.AffineCoupling","text":"Bases: Flow Affine Coupling layer as introduced RealNVP paper, see arXiv: 1605.08803 Source code in normflows/flows/affine/coupling.py 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 class AffineCoupling ( Flow ): \"\"\" Affine Coupling layer as introduced RealNVP paper, see arXiv: 1605.08803 \"\"\" def __init__ ( self , param_map , scale = True , scale_map = \"exp\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model \"\"\" super () . __init__ () self . add_module ( \"param_map\" , param_map ) self . scale = scale self . scale_map = scale_map def forward ( self , z ): \"\"\" z is a list of z1 and z2; ```z = [z1, z2]``` z1 is left constant and affine map is applied to z2 with parameters depending on z1 Args: z \"\"\" z1 , z2 = z param = self . param_map ( z1 ) if self . scale : shift = param [:, 0 :: 2 , ... ] scale_ = param [:, 1 :: 2 , ... ] if self . scale_map == \"exp\" : z2 = z2 * torch . exp ( scale_ ) + shift log_det = torch . sum ( scale_ , dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 / scale + shift log_det = - torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid_inv\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 * scale + shift log_det = torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) else : raise NotImplementedError ( \"This scale map is not implemented.\" ) else : z2 = z2 + param log_det = zero_log_det_like_z ( z2 ) return [ z1 , z2 ], log_det def inverse ( self , z ): z1 , z2 = z param = self . param_map ( z1 ) if self . scale : shift = param [:, 0 :: 2 , ... ] scale_ = param [:, 1 :: 2 , ... ] if self . scale_map == \"exp\" : z2 = ( z2 - shift ) * torch . exp ( - scale_ ) log_det = - torch . sum ( scale_ , dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = ( z2 - shift ) * scale log_det = torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid_inv\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = ( z2 - shift ) / scale log_det = - torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) else : raise NotImplementedError ( \"This scale map is not implemented.\" ) else : z2 = z2 - param log_det = zero_log_det_like_z ( z2 ) return [ z1 , z2 ], log_det","title":"AffineCoupling"},{"location":"references/#normflows.flows.affine.coupling.AffineCoupling.__init__","text":"Constructor Parameters: Name Type Description Default param_map Maps features to shift and scale parameter (if applicable) required scale Flag whether scale shall be applied True scale_map Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model 'exp' Source code in normflows/flows/affine/coupling.py 104 105 106 107 108 109 110 111 112 113 114 115 def __init__ ( self , param_map , scale = True , scale_map = \"exp\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow, 'sigmoid_inv' uses multiplicative sigmoid scale when sampling from the model \"\"\" super () . __init__ () self . add_module ( \"param_map\" , param_map ) self . scale = scale self . scale_map = scale_map","title":"__init__"},{"location":"references/#normflows.flows.affine.coupling.AffineCoupling.forward","text":"z is a list of z1 and z2; z = [z1, z2] z1 is left constant and affine map is applied to z2 with parameters depending on z1 Source code in normflows/flows/affine/coupling.py 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 def forward ( self , z ): \"\"\" z is a list of z1 and z2; ```z = [z1, z2]``` z1 is left constant and affine map is applied to z2 with parameters depending on z1 Args: z \"\"\" z1 , z2 = z param = self . param_map ( z1 ) if self . scale : shift = param [:, 0 :: 2 , ... ] scale_ = param [:, 1 :: 2 , ... ] if self . scale_map == \"exp\" : z2 = z2 * torch . exp ( scale_ ) + shift log_det = torch . sum ( scale_ , dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 / scale + shift log_det = - torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) elif self . scale_map == \"sigmoid_inv\" : scale = torch . sigmoid ( scale_ + 2 ) z2 = z2 * scale + shift log_det = torch . sum ( torch . log ( scale ), dim = list ( range ( 1 , shift . dim ()))) else : raise NotImplementedError ( \"This scale map is not implemented.\" ) else : z2 = z2 + param log_det = zero_log_det_like_z ( z2 ) return [ z1 , z2 ], log_det","title":"forward"},{"location":"references/#normflows.flows.affine.coupling.AffineCouplingBlock","text":"Bases: Flow Affine Coupling layer including split and merge operation Source code in normflows/flows/affine/coupling.py 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 class AffineCouplingBlock ( Flow ): \"\"\" Affine Coupling layer including split and merge operation \"\"\" def __init__ ( self , param_map , scale = True , scale_map = \"exp\" , split_mode = \"channel\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Split layer self . flows += [ Split ( split_mode )] # Affine coupling layer self . flows += [ AffineCoupling ( param_map , scale , scale_map )] # Merge layer self . flows += [ Merge ( split_mode )] def forward ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for flow in self . flows : z , log_det = flow ( z ) log_det_tot += log_det return z , log_det_tot def inverse ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_det_tot += log_det return z , log_det_tot","title":"AffineCouplingBlock"},{"location":"references/#normflows.flows.affine.coupling.AffineCouplingBlock.__init__","text":"Constructor Parameters: Name Type Description Default param_map Maps features to shift and scale parameter (if applicable) required scale Flag whether scale shall be applied True scale_map Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow 'exp' split_mode Splitting mode, for possible values see Split class 'channel' Source code in normflows/flows/affine/coupling.py 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 def __init__ ( self , param_map , scale = True , scale_map = \"exp\" , split_mode = \"channel\" ): \"\"\"Constructor Args: param_map: Maps features to shift and scale parameter (if applicable) scale: Flag whether scale shall be applied scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Split layer self . flows += [ Split ( split_mode )] # Affine coupling layer self . flows += [ AffineCoupling ( param_map , scale , scale_map )] # Merge layer self . flows += [ Merge ( split_mode )]","title":"__init__"},{"location":"references/#normflows.flows.affine.coupling.CCAffineConst","text":"Bases: Flow Affine constant flow layer with class-conditional parameters Source code in normflows/flows/affine/coupling.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 class CCAffineConst ( Flow ): \"\"\" Affine constant flow layer with class-conditional parameters \"\"\" def __init__ ( self , shape , num_classes ): super () . __init__ () if isinstance ( shape , int ): shape = ( shape ,) self . shape = shape self . s = nn . Parameter ( torch . zeros ( shape )[ None ]) self . t = nn . Parameter ( torch . zeros ( shape )[ None ]) self . s_cc = nn . Parameter ( torch . zeros ( num_classes , np . prod ( shape ))) self . t_cc = nn . Parameter ( torch . zeros ( num_classes , np . prod ( shape ))) self . n_dim = self . s . dim () self . batch_dims = torch . nonzero ( torch . tensor ( self . s . shape ) == 1 , as_tuple = False )[:, 0 ] . tolist () def forward ( self , z , y ): s = self . s + ( y @ self . s_cc ) . view ( - 1 , * self . shape ) t = self . t + ( y @ self . t_cc ) . view ( - 1 , * self . shape ) z_ = z * torch . exp ( s ) + t if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = prod_batch_dims * torch . sum ( s , dim = list ( range ( 1 , self . n_dim ))) return z_ , log_det def inverse ( self , z , y ): s = self . s + ( y @ self . s_cc ) . view ( - 1 , * self . shape ) t = self . t + ( y @ self . t_cc ) . view ( - 1 , * self . shape ) z_ = ( z - t ) * torch . exp ( - s ) if len ( self . batch_dims ) > 1 : prod_batch_dims = np . prod ([ z . size ( i ) for i in self . batch_dims [ 1 :]]) else : prod_batch_dims = 1 log_det = - prod_batch_dims * torch . sum ( s , dim = list ( range ( 1 , self . n_dim ))) return z_ , log_det","title":"CCAffineConst"},{"location":"references/#normflows.flows.affine.coupling.MaskedAffineFlow","text":"Bases: Flow RealNVP as introduced in arXiv: 1605.08803 Masked affine flow: f(z) = b * z + (1 - b) * (z * exp(s(b * z)) + t) class AffineHalfFlow(Flow): is MaskedAffineFlow with alternating bit mask NICE is AffineFlow with only shifts (volume preserving) Source code in normflows/flows/affine/coupling.py 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 class MaskedAffineFlow ( Flow ): \"\"\"RealNVP as introduced in [arXiv: 1605.08803](https://arxiv.org/abs/1605.08803) Masked affine flow: ``` f(z) = b * z + (1 - b) * (z * exp(s(b * z)) + t) ``` - class AffineHalfFlow(Flow): is MaskedAffineFlow with alternating bit mask - NICE is AffineFlow with only shifts (volume preserving) \"\"\" def __init__ ( self , b , t = None , s = None ): \"\"\"Constructor Args: b: mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s t: translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied s: scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied \"\"\" super () . __init__ () self . b_cpu = b . view ( 1 , * b . size ()) self . register_buffer ( \"b\" , self . b_cpu ) if s is None : self . s = torch . zeros_like else : self . add_module ( \"s\" , s ) if t is None : self . t = torch . zeros_like else : self . add_module ( \"t\" , t ) def forward ( self , z ): z_masked = self . b * z scale = self . s ( z_masked ) nan = torch . tensor ( np . nan , dtype = z . dtype , device = z . device ) scale = torch . where ( torch . isfinite ( scale ), scale , nan ) trans = self . t ( z_masked ) trans = torch . where ( torch . isfinite ( trans ), trans , nan ) z_ = z_masked + ( 1 - self . b ) * ( z * torch . exp ( scale ) + trans ) log_det = torch . sum (( 1 - self . b ) * scale , dim = list ( range ( 1 , self . b . dim ()))) return z_ , log_det def inverse ( self , z ): z_masked = self . b * z scale = self . s ( z_masked ) nan = torch . tensor ( np . nan , dtype = z . dtype , device = z . device ) scale = torch . where ( torch . isfinite ( scale ), scale , nan ) trans = self . t ( z_masked ) trans = torch . where ( torch . isfinite ( trans ), trans , nan ) z_ = z_masked + ( 1 - self . b ) * ( z - trans ) * torch . exp ( - scale ) log_det = - torch . sum (( 1 - self . b ) * scale , dim = list ( range ( 1 , self . b . dim ()))) return z_ , log_det","title":"MaskedAffineFlow"},{"location":"references/#normflows.flows.affine.coupling.MaskedAffineFlow.__init__","text":"Constructor Parameters: Name Type Description Default b mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s required t translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied None s scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied None Source code in normflows/flows/affine/coupling.py 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 def __init__ ( self , b , t = None , s = None ): \"\"\"Constructor Args: b: mask for features, i.e. tensor of same size as latent data point filled with 0s and 1s t: translation mapping, i.e. neural network, where first input dimension is batch dim, if None no translation is applied s: scale mapping, i.e. neural network, where first input dimension is batch dim, if None no scale is applied \"\"\" super () . __init__ () self . b_cpu = b . view ( 1 , * b . size ()) self . register_buffer ( \"b\" , self . b_cpu ) if s is None : self . s = torch . zeros_like else : self . add_module ( \"s\" , s ) if t is None : self . t = torch . zeros_like else : self . add_module ( \"t\" , t )","title":"__init__"},{"location":"references/#normflows.flows.affine.coupling_test","text":"","title":"coupling_test"},{"location":"references/#normflows.flows.affine.glow","text":"","title":"glow"},{"location":"references/#normflows.flows.affine.glow.GlowBlock","text":"Bases: Flow Glow: Generative Flow with Invertible 1\u00d71 Convolutions, arXiv: 1807.03039 One Block of the Glow model, comprised of MaskedAffineFlow (affine coupling layer) Invertible1x1Conv (dropped if there is only one channel) ActNorm (first batch used for initialization) Source code in normflows/flows/affine/glow.py 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 class GlowBlock ( Flow ): \"\"\"Glow: Generative Flow with Invertible 1\u00d71 Convolutions, [arXiv: 1807.03039](https://arxiv.org/abs/1807.03039) One Block of the Glow model, comprised of - MaskedAffineFlow (affine coupling layer) - Invertible1x1Conv (dropped if there is only one channel) - ActNorm (first batch used for initialization) \"\"\" def __init__ ( self , channels , hidden_channels , scale = True , scale_map = \"sigmoid\" , split_mode = \"channel\" , leaky = 0.0 , init_zeros = True , use_lu = True , net_actnorm = False , ): \"\"\"Constructor Args: channels: Number of channels of the data hidden_channels: number of channels in the hidden layer of the ConvNet scale: Flag, whether to include scale in affine coupling layer scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class leaky: Leaky parameter of LeakyReLUs of ConvNet2d init_zeros: Flag whether to initialize last conv layer with zeros use_lu: Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers logscale_factor: Factor which can be used to control the scale of the log scale factor, see [source](https://github.com/openai/glow) \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Coupling layer kernel_size = ( 3 , 1 , 3 ) num_param = 2 if scale else 1 if \"channel\" == split_mode : channels_ = (( channels + 1 ) // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * ( channels // 2 ),) elif \"channel_inv\" == split_mode : channels_ = ( channels // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * (( channels + 1 ) // 2 ),) elif \"checkerboard\" in split_mode : channels_ = ( channels ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * channels ,) else : raise NotImplementedError ( \"Mode \" + split_mode + \" is not implemented.\" ) param_map = nets . ConvNet2d ( channels_ , kernel_size , leaky , init_zeros , actnorm = net_actnorm ) self . flows += [ AffineCouplingBlock ( param_map , scale , scale_map , split_mode )] # Invertible 1x1 convolution if channels > 1 : self . flows += [ Invertible1x1Conv ( channels , use_lu )] # Activation normalization self . flows += [ ActNorm (( channels ,) + ( 1 , 1 ))] def forward ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for flow in self . flows : z , log_det = flow ( z ) log_det_tot += log_det return z , log_det_tot def inverse ( self , z ): log_det_tot = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) for i in range ( len ( self . flows ) - 1 , - 1 , - 1 ): z , log_det = self . flows [ i ] . inverse ( z ) log_det_tot += log_det return z , log_det_tot","title":"GlowBlock"},{"location":"references/#normflows.flows.affine.glow.GlowBlock.__init__","text":"Constructor Parameters: Name Type Description Default channels Number of channels of the data required hidden_channels number of channels in the hidden layer of the ConvNet required scale Flag, whether to include scale in affine coupling layer True scale_map Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow 'sigmoid' split_mode Splitting mode, for possible values see Split class 'channel' leaky Leaky parameter of LeakyReLUs of ConvNet2d 0.0 init_zeros Flag whether to initialize last conv layer with zeros True use_lu Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers True logscale_factor Factor which can be used to control the scale of the log scale factor, see source required Source code in normflows/flows/affine/glow.py 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 def __init__ ( self , channels , hidden_channels , scale = True , scale_map = \"sigmoid\" , split_mode = \"channel\" , leaky = 0.0 , init_zeros = True , use_lu = True , net_actnorm = False , ): \"\"\"Constructor Args: channels: Number of channels of the data hidden_channels: number of channels in the hidden layer of the ConvNet scale: Flag, whether to include scale in affine coupling layer scale_map: Map to be applied to the scale parameter, can be 'exp' as in RealNVP or 'sigmoid' as in Glow split_mode: Splitting mode, for possible values see Split class leaky: Leaky parameter of LeakyReLUs of ConvNet2d init_zeros: Flag whether to initialize last conv layer with zeros use_lu: Flag whether to parametrize weights through the LU decomposition in invertible 1x1 convolution layers logscale_factor: Factor which can be used to control the scale of the log scale factor, see [source](https://github.com/openai/glow) \"\"\" super () . __init__ () self . flows = nn . ModuleList ([]) # Coupling layer kernel_size = ( 3 , 1 , 3 ) num_param = 2 if scale else 1 if \"channel\" == split_mode : channels_ = (( channels + 1 ) // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * ( channels // 2 ),) elif \"channel_inv\" == split_mode : channels_ = ( channels // 2 ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * (( channels + 1 ) // 2 ),) elif \"checkerboard\" in split_mode : channels_ = ( channels ,) + 2 * ( hidden_channels ,) channels_ += ( num_param * channels ,) else : raise NotImplementedError ( \"Mode \" + split_mode + \" is not implemented.\" ) param_map = nets . ConvNet2d ( channels_ , kernel_size , leaky , init_zeros , actnorm = net_actnorm ) self . flows += [ AffineCouplingBlock ( param_map , scale , scale_map , split_mode )] # Invertible 1x1 convolution if channels > 1 : self . flows += [ Invertible1x1Conv ( channels , use_lu )] # Activation normalization self . flows += [ ActNorm (( channels ,) + ( 1 , 1 ))]","title":"__init__"},{"location":"references/#normflows.flows.affine.glow_test","text":"","title":"glow_test"},{"location":"references/#normflows.flows.base","text":"","title":"base"},{"location":"references/#normflows.flows.base.Composite","text":"Bases: Flow Composes several flows into one, in the order they are given. Source code in normflows/flows/base.py 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 class Composite ( Flow ): \"\"\" Composes several flows into one, in the order they are given. \"\"\" def __init__ ( self , flows ): \"\"\"Constructor Args: flows: Iterable of flows to composite \"\"\" super () . __init__ () self . _flows = nn . ModuleList ( flows ) @staticmethod def _cascade ( inputs , funcs ): batch_size = inputs . shape [ 0 ] outputs = inputs total_logabsdet = torch . zeros ( batch_size ) for func in funcs : outputs , logabsdet = func ( outputs ) total_logabsdet += logabsdet return outputs , total_logabsdet def forward ( self , inputs ): funcs = self . _flows return self . _cascade ( inputs , funcs ) def inverse ( self , inputs ): funcs = ( flow . inverse for flow in self . _flows [:: - 1 ]) return self . _cascade ( inputs , funcs )","title":"Composite"},{"location":"references/#normflows.flows.base.Composite.__init__","text":"Constructor Parameters: Name Type Description Default flows Iterable of flows to composite required Source code in normflows/flows/base.py 53 54 55 56 57 58 59 60 def __init__ ( self , flows ): \"\"\"Constructor Args: flows: Iterable of flows to composite \"\"\" super () . __init__ () self . _flows = nn . ModuleList ( flows )","title":"__init__"},{"location":"references/#normflows.flows.base.Flow","text":"Bases: Module Generic class for flow functions Source code in normflows/flows/base.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 class Flow ( nn . Module ): \"\"\" Generic class for flow functions \"\"\" def __init__ ( self ): super () . __init__ () def forward ( self , z ): \"\"\" Args: z: input variable, first dimension is batch dim Returns: transformed z and log of absolute determinant \"\"\" raise NotImplementedError ( \"Forward pass has not been implemented.\" ) def inverse ( self , z ): raise NotImplementedError ( \"This flow has no algebraic inverse.\" )","title":"Flow"},{"location":"references/#normflows.flows.base.Flow.forward","text":"Parameters: Name Type Description Default z input variable, first dimension is batch dim required Returns: Type Description transformed z and log of absolute determinant Source code in normflows/flows/base.py 13 14 15 16 17 18 19 20 21 def forward ( self , z ): \"\"\" Args: z: input variable, first dimension is batch dim Returns: transformed z and log of absolute determinant \"\"\" raise NotImplementedError ( \"Forward pass has not been implemented.\" )","title":"forward"},{"location":"references/#normflows.flows.base.Reverse","text":"Bases: Flow Switches the forward transform of a flow layer with its inverse and vice versa Source code in normflows/flows/base.py 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 class Reverse ( Flow ): \"\"\" Switches the forward transform of a flow layer with its inverse and vice versa \"\"\" def __init__ ( self , flow ): \"\"\"Constructor Args: flow: Flow layer to be reversed \"\"\" super () . __init__ () self . flow = flow def forward ( self , z ): return self . flow . inverse ( z ) def inverse ( self , z ): return self . flow . forward ( z )","title":"Reverse"},{"location":"references/#normflows.flows.base.Reverse.__init__","text":"Constructor Parameters: Name Type Description Default flow Flow layer to be reversed required Source code in normflows/flows/base.py 32 33 34 35 36 37 38 39 def __init__ ( self , flow ): \"\"\"Constructor Args: flow: Flow layer to be reversed \"\"\" super () . __init__ () self . flow = flow","title":"__init__"},{"location":"references/#normflows.flows.base_test","text":"","title":"base_test"},{"location":"references/#normflows.flows.flow_test","text":"","title":"flow_test"},{"location":"references/#normflows.flows.flow_test.FlowTest","text":"Bases: TestCase Generic test case for flow modules Source code in normflows/flows/flow_test.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 class FlowTest ( unittest . TestCase ): \"\"\" Generic test case for flow modules \"\"\" def assertClose ( self , actual , expected , atol = None , rtol = None ): assert_close ( actual , expected , atol = atol , rtol = rtol ) def checkForward ( self , flow , inputs , context = None ): # Do forward transform if context is None : outputs , log_det = flow ( inputs ) else : outputs , log_det = flow ( inputs , context ) # Check type assert outputs . dtype == inputs . dtype # Check shape assert outputs . shape == inputs . shape # Return results return outputs , log_det def checkInverse ( self , flow , inputs , context = None ): # Do inverse transform if context is None : outputs , log_det = flow . inverse ( inputs ) else : outputs , log_det = flow . inverse ( inputs , context ) # Check type assert outputs . dtype == inputs . dtype # Check shape assert outputs . shape == inputs . shape # Return results return outputs , log_det def checkForwardInverse ( self , flow , inputs , context = None , atol = None , rtol = None ): # Check forward outputs , log_det = self . checkForward ( flow , inputs , context ) # Check inverse input_ , log_det_ = self . checkInverse ( flow , outputs , context ) # Check identity self . assertClose ( input_ , inputs , atol , rtol ) ld_id = log_det + log_det_ self . assertClose ( ld_id , torch . zeros_like ( ld_id ), atol , rtol )","title":"FlowTest"},{"location":"references/#normflows.flows.mixing","text":"","title":"mixing"},{"location":"references/#normflows.flows.mixing.Invertible1x1Conv","text":"Bases: Flow Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW Source code in normflows/flows/mixing.py 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 class Invertible1x1Conv ( Flow ): \"\"\" Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW \"\"\" def __init__ ( self , num_channels , use_lu = False ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q ) def _assemble_W ( self , inverse = False ): # assemble W from its components (P, L, U, S) L = torch . tril ( self . L , diagonal =- 1 ) + self . eye U = torch . triu ( self . U , diagonal = 1 ) + torch . diag ( self . sign_S * torch . exp ( self . log_S ) ) if inverse : if self . log_S . dtype == torch . float64 : L_inv = torch . inverse ( L ) U_inv = torch . inverse ( U ) else : L_inv = torch . inverse ( L . double ()) . type ( self . log_S . dtype ) U_inv = torch . inverse ( U . double ()) . type ( self . log_S . dtype ) W = U_inv @ L_inv @ self . P . t () else : W = self . P @ L @ U return W def forward ( self , z ): if self . use_lu : W = self . _assemble_W ( inverse = True ) log_det = - torch . sum ( self . log_S ) else : W_dtype = self . W . dtype if W_dtype == torch . float64 : W = torch . inverse ( self . W ) else : W = torch . inverse ( self . W . double ()) . type ( W_dtype ) W = W . view ( * W . size (), 1 , 1 ) log_det = - torch . slogdet ( self . W )[ 1 ] W = W . view ( self . num_channels , self . num_channels , 1 , 1 ) z_ = torch . nn . functional . conv2d ( z , W ) log_det = log_det * z . size ( 2 ) * z . size ( 3 ) return z_ , log_det def inverse ( self , z ): if self . use_lu : W = self . _assemble_W () log_det = torch . sum ( self . log_S ) else : W = self . W log_det = torch . slogdet ( self . W )[ 1 ] W = W . view ( self . num_channels , self . num_channels , 1 , 1 ) z_ = torch . nn . functional . conv2d ( z , W ) log_det = log_det * z . size ( 2 ) * z . size ( 3 ) return z_ , log_det","title":"Invertible1x1Conv"},{"location":"references/#normflows.flows.mixing.Invertible1x1Conv.__init__","text":"Constructor Parameters: Name Type Description Default num_channels Number of channels of the data required use_lu Flag whether to parametrize weights through the LU decomposition False Source code in normflows/flows/mixing.py 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 def __init__ ( self , num_channels , use_lu = False ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q )","title":"__init__"},{"location":"references/#normflows.flows.mixing.InvertibleAffine","text":"Bases: Flow Invertible affine transformation without shift, i.e. one-dimensional version of the invertible 1x1 convolutions Source code in normflows/flows/mixing.py 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 class InvertibleAffine ( Flow ): \"\"\" Invertible affine transformation without shift, i.e. one-dimensional version of the invertible 1x1 convolutions \"\"\" def __init__ ( self , num_channels , use_lu = True ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q ) def _assemble_W ( self , inverse = False ): # assemble W from its components (P, L, U, S) L = torch . tril ( self . L , diagonal =- 1 ) + self . eye U = torch . triu ( self . U , diagonal = 1 ) + torch . diag ( self . sign_S * torch . exp ( self . log_S ) ) if inverse : if self . log_S . dtype == torch . float64 : L_inv = torch . inverse ( L ) U_inv = torch . inverse ( U ) else : L_inv = torch . inverse ( L . double ()) . type ( self . log_S . dtype ) U_inv = torch . inverse ( U . double ()) . type ( self . log_S . dtype ) W = U_inv @ L_inv @ self . P . t () else : W = self . P @ L @ U return W def forward ( self , z , context = None ): if self . use_lu : W = self . _assemble_W ( inverse = True ) log_det = - torch . sum ( self . log_S ) else : W_dtype = self . W . dtype if W_dtype == torch . float64 : W = torch . inverse ( self . W ) else : W = torch . inverse ( self . W . double ()) . type ( W_dtype ) log_det = - torch . slogdet ( self . W )[ 1 ] z_ = z @ W return z_ , log_det def inverse ( self , z , context = None ): if self . use_lu : W = self . _assemble_W () log_det = torch . sum ( self . log_S ) else : W = self . W log_det = torch . slogdet ( self . W )[ 1 ] z_ = z @ W return z_ , log_det","title":"InvertibleAffine"},{"location":"references/#normflows.flows.mixing.InvertibleAffine.__init__","text":"Constructor Parameters: Name Type Description Default num_channels Number of channels of the data required use_lu Flag whether to parametrize weights through the LU decomposition True Source code in normflows/flows/mixing.py 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 def __init__ ( self , num_channels , use_lu = True ): \"\"\"Constructor Args: num_channels: Number of channels of the data use_lu: Flag whether to parametrize weights through the LU decomposition \"\"\" super () . __init__ () self . num_channels = num_channels self . use_lu = use_lu Q , _ = torch . linalg . qr ( torch . randn ( self . num_channels , self . num_channels )) if use_lu : P , L , U = torch . lu_unpack ( * Q . lu ()) self . register_buffer ( \"P\" , P ) # remains fixed during optimization self . L = nn . Parameter ( L ) # lower triangular portion S = U . diag () # \"crop out\" the diagonal to its own parameter self . register_buffer ( \"sign_S\" , torch . sign ( S )) self . log_S = nn . Parameter ( torch . log ( torch . abs ( S ))) self . U = nn . Parameter ( torch . triu ( U , diagonal = 1 ) ) # \"crop out\" diagonal, stored in S self . register_buffer ( \"eye\" , torch . diag ( torch . ones ( self . num_channels ))) else : self . W = nn . Parameter ( Q )","title":"__init__"},{"location":"references/#normflows.flows.mixing.LULinearPermute","text":"Bases: Flow Fixed permutation combined with a linear transformation parametrized using the LU decomposition, used in https://arxiv.org/abs/1906.04032 Source code in normflows/flows/mixing.py 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 class LULinearPermute ( Flow ): \"\"\" Fixed permutation combined with a linear transformation parametrized using the LU decomposition, used in https://arxiv.org/abs/1906.04032 \"\"\" def __init__ ( self , num_channels , identity_init = True ): \"\"\"Constructor Args: num_channels: Number of dimensions of the data identity_init: Flag, whether to initialize linear transform as identity matrix \"\"\" # Initialize super () . __init__ () # Define modules self . permutation = _RandomPermutation ( num_channels ) self . linear = _LULinear ( num_channels , identity_init = identity_init ) def forward ( self , z , context = None ): z , log_det = self . linear . inverse ( z , context = context ) z , _ = self . permutation . inverse ( z , context = context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , _ = self . permutation ( z , context = context ) z , log_det = self . linear ( z , context = context ) return z , log_det . view ( - 1 )","title":"LULinearPermute"},{"location":"references/#normflows.flows.mixing.LULinearPermute.__init__","text":"Constructor Parameters: Name Type Description Default num_channels Number of dimensions of the data required identity_init Flag, whether to initialize linear transform as identity matrix True Source code in normflows/flows/mixing.py 541 542 543 544 545 546 547 548 549 550 551 552 553 def __init__ ( self , num_channels , identity_init = True ): \"\"\"Constructor Args: num_channels: Number of dimensions of the data identity_init: Flag, whether to initialize linear transform as identity matrix \"\"\" # Initialize super () . __init__ () # Define modules self . permutation = _RandomPermutation ( num_channels ) self . linear = _LULinear ( num_channels , identity_init = identity_init )","title":"__init__"},{"location":"references/#normflows.flows.mixing.Permute","text":"Bases: Flow Permutation features along the channel dimension Source code in normflows/flows/mixing.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 class Permute ( Flow ): \"\"\" Permutation features along the channel dimension \"\"\" def __init__ ( self , num_channels , mode = \"shuffle\" ): \"\"\"Constructor Args: num_channel: Number of channels mode: Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part \"\"\" super () . __init__ () self . mode = mode self . num_channels = num_channels if self . mode == \"shuffle\" : perm = torch . randperm ( self . num_channels ) inv_perm = torch . empty_like ( perm ) . scatter_ ( dim = 0 , index = perm , src = torch . arange ( self . num_channels ) ) self . register_buffer ( \"perm\" , perm ) self . register_buffer ( \"inv_perm\" , inv_perm ) def forward ( self , z , context = None ): if self . mode == \"shuffle\" : z = z [:, self . perm , ... ] elif self . mode == \"swap\" : z1 = z [:, : self . num_channels // 2 , ... ] z2 = z [:, self . num_channels // 2 :, ... ] z = torch . cat ([ z2 , z1 ], dim = 1 ) else : raise NotImplementedError ( \"The mode \" + self . mode + \" is not implemented.\" ) log_det = torch . zeros ( len ( z ), device = z . device ) return z , log_det def inverse ( self , z , context = None ): if self . mode == \"shuffle\" : z = z [:, self . inv_perm , ... ] elif self . mode == \"swap\" : z1 = z [:, : ( self . num_channels + 1 ) // 2 , ... ] z2 = z [:, ( self . num_channels + 1 ) // 2 :, ... ] z = torch . cat ([ z2 , z1 ], dim = 1 ) else : raise NotImplementedError ( \"The mode \" + self . mode + \" is not implemented.\" ) log_det = torch . zeros ( len ( z ), device = z . device ) return z , log_det","title":"Permute"},{"location":"references/#normflows.flows.mixing.Permute.__init__","text":"Constructor Parameters: Name Type Description Default num_channel Number of channels required mode Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part 'shuffle' Source code in normflows/flows/mixing.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 def __init__ ( self , num_channels , mode = \"shuffle\" ): \"\"\"Constructor Args: num_channel: Number of channels mode: Mode of permuting features, can be shuffle for random permutation or swap for interchanging upper and lower part \"\"\" super () . __init__ () self . mode = mode self . num_channels = num_channels if self . mode == \"shuffle\" : perm = torch . randperm ( self . num_channels ) inv_perm = torch . empty_like ( perm ) . scatter_ ( dim = 0 , index = perm , src = torch . arange ( self . num_channels ) ) self . register_buffer ( \"perm\" , perm ) self . register_buffer ( \"inv_perm\" , inv_perm )","title":"__init__"},{"location":"references/#normflows.flows.mixing_test","text":"","title":"mixing_test"},{"location":"references/#normflows.flows.neural_spline","text":"","title":"neural_spline"},{"location":"references/#normflows.flows.neural_spline.autoregressive","text":"Implementations of autoregressive transforms. Code taken from https://github.com/bayesiains/nsf","title":"autoregressive"},{"location":"references/#normflows.flows.neural_spline.autoregressive_test","text":"Tests for the autoregressive transforms. Code partially taken from https://github.com/bayesiains/nsf","title":"autoregressive_test"},{"location":"references/#normflows.flows.neural_spline.coupling","text":"Implementations of various coupling layers. Code taken from https://github.com/bayesiains/nsf","title":"coupling"},{"location":"references/#normflows.flows.neural_spline.coupling.Coupling","text":"Bases: Flow A base class for coupling layers. Supports 2D inputs (NxD), as well as 4D inputs for images (NxCxHxW). For images the splitting is done on the channel dimension, using the provided 1D mask. Source code in normflows/flows/neural_spline/coupling.py 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 class Coupling ( Flow ): \"\"\"A base class for coupling layers. Supports 2D inputs (NxD), as well as 4D inputs for images (NxCxHxW). For images the splitting is done on the channel dimension, using the provided 1D mask.\"\"\" def __init__ ( self , mask , transform_net_create_fn , unconditional_transform = None ): \"\"\"Constructor. mask: a 1-dim tensor, tuple or list. It indexes inputs as follows: - if `mask[i] > 0`, `input[i]` will be transformed. - if `mask[i] <= 0`, `input[i]` will be passed unchanged. Args: mask \"\"\" mask = torch . as_tensor ( mask ) if mask . dim () != 1 : raise ValueError ( \"Mask must be a 1-dim tensor.\" ) if mask . numel () <= 0 : raise ValueError ( \"Mask can't be empty.\" ) super () . __init__ () self . features = len ( mask ) features_vector = torch . arange ( self . features ) self . register_buffer ( \"identity_features\" , features_vector . masked_select ( mask <= 0 ) ) self . register_buffer ( \"transform_features\" , features_vector . masked_select ( mask > 0 ) ) assert self . num_identity_features + self . num_transform_features == self . features self . transform_net = transform_net_create_fn ( self . num_identity_features , self . num_transform_features * self . _transform_dim_multiplier (), ) if unconditional_transform is None : self . unconditional_transform = None else : self . unconditional_transform = unconditional_transform ( features = self . num_identity_features ) @property def num_identity_features ( self ): return len ( self . identity_features ) @property def num_transform_features ( self ): return len ( self . transform_features ) def forward ( self , inputs , context = None ): if inputs . dim () not in [ 2 , 4 ]: raise ValueError ( \"Inputs must be a 2D or a 4D tensor.\" ) if inputs . shape [ 1 ] != self . features : raise ValueError ( \"Expected features = {} , got {} .\" . format ( self . features , inputs . shape [ 1 ]) ) identity_split = inputs [:, self . identity_features , ... ] transform_split = inputs [:, self . transform_features , ... ] transform_params = self . transform_net ( identity_split , context ) transform_split , logabsdet = self . _coupling_transform_forward ( inputs = transform_split , transform_params = transform_params ) if self . unconditional_transform is not None : identity_split , logabsdet_identity = self . unconditional_transform ( identity_split , context ) logabsdet += logabsdet_identity outputs = torch . empty_like ( inputs ) outputs [:, self . identity_features , ... ] = identity_split outputs [:, self . transform_features , ... ] = transform_split return outputs , logabsdet def inverse ( self , inputs , context = None ): if inputs . dim () not in [ 2 , 4 ]: raise ValueError ( \"Inputs must be a 2D or a 4D tensor.\" ) if inputs . shape [ 1 ] != self . features : raise ValueError ( \"Expected features = {} , got {} .\" . format ( self . features , inputs . shape [ 1 ]) ) identity_split = inputs [:, self . identity_features , ... ] transform_split = inputs [:, self . transform_features , ... ] logabsdet = 0.0 if self . unconditional_transform is not None : identity_split , logabsdet = self . unconditional_transform . inverse ( identity_split , context ) transform_params = self . transform_net ( identity_split , context ) transform_split , logabsdet_split = self . _coupling_transform_inverse ( inputs = transform_split , transform_params = transform_params ) logabsdet += logabsdet_split outputs = torch . empty_like ( inputs ) outputs [:, self . identity_features ] = identity_split outputs [:, self . transform_features ] = transform_split return outputs , logabsdet def _transform_dim_multiplier ( self ): \"\"\"Number of features to output for each transform dimension.\"\"\" raise NotImplementedError () def _coupling_transform_forward ( self , inputs , transform_params ): \"\"\"Forward pass of the coupling transform.\"\"\" raise NotImplementedError () def _coupling_transform_inverse ( self , inputs , transform_params ): \"\"\"Inverse of the coupling transform.\"\"\" raise NotImplementedError ()","title":"Coupling"},{"location":"references/#normflows.flows.neural_spline.coupling.Coupling.__init__","text":"Constructor. mask: a 1-dim tensor, tuple or list. It indexes inputs as follows: if mask[i] > 0 , input[i] will be transformed. if mask[i] <= 0 , input[i] will be passed unchanged. Source code in normflows/flows/neural_spline/coupling.py 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 def __init__ ( self , mask , transform_net_create_fn , unconditional_transform = None ): \"\"\"Constructor. mask: a 1-dim tensor, tuple or list. It indexes inputs as follows: - if `mask[i] > 0`, `input[i]` will be transformed. - if `mask[i] <= 0`, `input[i]` will be passed unchanged. Args: mask \"\"\" mask = torch . as_tensor ( mask ) if mask . dim () != 1 : raise ValueError ( \"Mask must be a 1-dim tensor.\" ) if mask . numel () <= 0 : raise ValueError ( \"Mask can't be empty.\" ) super () . __init__ () self . features = len ( mask ) features_vector = torch . arange ( self . features ) self . register_buffer ( \"identity_features\" , features_vector . masked_select ( mask <= 0 ) ) self . register_buffer ( \"transform_features\" , features_vector . masked_select ( mask > 0 ) ) assert self . num_identity_features + self . num_transform_features == self . features self . transform_net = transform_net_create_fn ( self . num_identity_features , self . num_transform_features * self . _transform_dim_multiplier (), ) if unconditional_transform is None : self . unconditional_transform = None else : self . unconditional_transform = unconditional_transform ( features = self . num_identity_features )","title":"__init__"},{"location":"references/#normflows.flows.neural_spline.coupling_test","text":"Tests for the coupling Transforms. Code partially taken from https://github.com/bayesiains/nsf","title":"coupling_test"},{"location":"references/#normflows.flows.neural_spline.wrapper","text":"","title":"wrapper"},{"location":"references/#normflows.flows.neural_spline.wrapper.AutoregressiveRationalQuadraticSpline","text":"Bases: Flow Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see sources Source code in normflows/flows/neural_spline/wrapper.py 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 class AutoregressiveRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see [sources](https://github.com/bayesiains/nsf) \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch.nn.Module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = \"linear\" , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , ) def forward ( self , z , context = None ): z , log_det = self . mprqat . inverse ( z , context = context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . mprqat ( z , context = context ) return z , log_det . view ( - 1 )","title":"AutoregressiveRationalQuadraticSpline"},{"location":"references/#normflows.flows.neural_spline.wrapper.AutoregressiveRationalQuadraticSpline.__init__","text":"Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required num_context_channels int Number of context/conditional channels None num_bins int Number of bins 8 tail_bound int Bound of the spline tails 3 activation Module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 permute_mask bool Flag, permutes the mask of the NN False init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch.nn.Module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = \"linear\" , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , )","title":"__init__"},{"location":"references/#normflows.flows.neural_spline.wrapper.CircularAutoregressiveRationalQuadraticSpline","text":"Bases: Flow Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see sources Source code in normflows/flows/neural_spline/wrapper.py 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 class CircularAutoregressiveRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see [sources](https://github.com/bayesiains/nsf) \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = True , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN ind_circ (Iterable): Indices of the circular coordinates num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = tails , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , ) def forward ( self , z , context = None ): z , log_det = self . mprqat . inverse ( z , context = context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . mprqat ( z , context = context ) return z , log_det . view ( - 1 )","title":"CircularAutoregressiveRationalQuadraticSpline"},{"location":"references/#normflows.flows.neural_spline.wrapper.CircularAutoregressiveRationalQuadraticSpline.__init__","text":"Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required ind_circ Iterable Indices of the circular coordinates required num_context_channels int Number of context/conditional channels None num_bins int Number of bins 8 tail_bound int Bound of the spline tails 3 activation torch module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 permute_mask bool Flag, permutes the mask of the NN True init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3 , activation = nn . ReLU , dropout_probability = 0.0 , permute_mask = True , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN ind_circ (Iterable): Indices of the circular coordinates num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tail_bound (int): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN permute_mask (bool): Flag, permutes the mask of the NN init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . mprqat = MaskedPiecewiseRationalQuadraticAutoregressive ( features = num_input_channels , hidden_features = num_hidden_channels , context_features = num_context_channels , num_bins = num_bins , tails = tails , tail_bound = tail_bound , num_blocks = num_blocks , use_residual_blocks = True , random_mask = False , permute_mask = permute_mask , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , init_identity = init_identity , )","title":"__init__"},{"location":"references/#normflows.flows.neural_spline.wrapper.CircularCoupledRationalQuadraticSpline","text":"Bases: Flow Neural spline flow coupling layer with circular coordinates Source code in normflows/flows/neural_spline/wrapper.py 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 class CircularCoupledRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer with circular coordinates \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , mask = None , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels ind_circ (Iterable): Indices of the circular coordinates num_bins (int): Number of bins tail_bound (float or Iterable): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used mask (torch tensor): Mask to be used, alternating masked generated is None init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () if mask is None : mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ) features_vector = torch . arange ( num_input_channels ) identity_features = features_vector . masked_select ( mask <= 0 ) ind_circ = torch . tensor ( ind_circ ) ind_circ_id = [] for i , id in enumerate ( identity_features ): if id in ind_circ : ind_circ_id += [ i ] if torch . is_tensor ( tail_bound ): scale_pf = np . pi / tail_bound [ ind_circ_id ] else : scale_pf = np . pi / tail_bound def transform_net_create_fn ( in_features , out_features ): if len ( ind_circ_id ) > 0 : pf = PeriodicFeaturesElementwise ( in_features , ind_circ_id , scale_pf ) else : pf = None net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , preprocessing = pf , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . prqct = PiecewiseRationalQuadraticCoupling ( mask = mask , transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , apply_unconditional_transform = True , ) def forward ( self , z , context = None ): z , log_det = self . prqct . inverse ( z , context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . prqct ( z , context ) return z , log_det . view ( - 1 )","title":"CircularCoupledRationalQuadraticSpline"},{"location":"references/#normflows.flows.neural_spline.wrapper.CircularCoupledRationalQuadraticSpline.__init__","text":"Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required num_context_channels int Number of context/conditional channels None ind_circ Iterable Indices of the circular coordinates required num_bins int Number of bins 8 tail_bound float or Iterable Bound of the spline tails 3.0 activation torch module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 reverse_mask bool Flag whether the reverse mask should be used False mask torch tensor Mask to be used, alternating masked generated is None None init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , ind_circ , num_context_channels = None , num_bins = 8 , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , mask = None , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels ind_circ (Iterable): Indices of the circular coordinates num_bins (int): Number of bins tail_bound (float or Iterable): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used mask (torch tensor): Mask to be used, alternating masked generated is None init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () if mask is None : mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ) features_vector = torch . arange ( num_input_channels ) identity_features = features_vector . masked_select ( mask <= 0 ) ind_circ = torch . tensor ( ind_circ ) ind_circ_id = [] for i , id in enumerate ( identity_features ): if id in ind_circ : ind_circ_id += [ i ] if torch . is_tensor ( tail_bound ): scale_pf = np . pi / tail_bound [ ind_circ_id ] else : scale_pf = np . pi / tail_bound def transform_net_create_fn ( in_features , out_features ): if len ( ind_circ_id ) > 0 : pf = PeriodicFeaturesElementwise ( in_features , ind_circ_id , scale_pf ) else : pf = None net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , preprocessing = pf , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net tails = [ \"circular\" if i in ind_circ else \"linear\" for i in range ( num_input_channels ) ] self . prqct = PiecewiseRationalQuadraticCoupling ( mask = mask , transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , apply_unconditional_transform = True , )","title":"__init__"},{"location":"references/#normflows.flows.neural_spline.wrapper.CoupledRationalQuadraticSpline","text":"Bases: Flow Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see source Source code in normflows/flows/neural_spline/wrapper.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 class CoupledRationalQuadraticSpline ( Flow ): \"\"\" Neural spline flow coupling layer, wrapper for the implementation of Durkan et al., see [source](https://github.com/bayesiains/nsf) \"\"\" def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tails = \"linear\" , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tails (str): Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval tail_bound (float): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () def transform_net_create_fn ( in_features , out_features ): net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net self . prqct = PiecewiseRationalQuadraticCoupling ( mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ), transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , # Setting True corresponds to equations (4), (5), (6) in the NSF paper: apply_unconditional_transform = True , ) def forward ( self , z , context = None ): z , log_det = self . prqct . inverse ( z , context ) return z , log_det . view ( - 1 ) def inverse ( self , z , context = None ): z , log_det = self . prqct ( z , context ) return z , log_det . view ( - 1 )","title":"CoupledRationalQuadraticSpline"},{"location":"references/#normflows.flows.neural_spline.wrapper.CoupledRationalQuadraticSpline.__init__","text":"Constructor Parameters: Name Type Description Default num_input_channels int Flow dimension required num_blocks int Number of residual blocks of the parameter NN required num_hidden_channels int Number of hidden units of the NN required num_context_channels int Number of context/conditional channels None num_bins int Number of bins 8 tails str Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval 'linear' tail_bound float Bound of the spline tails 3.0 activation torch module Activation function ReLU dropout_probability float Dropout probability of the NN 0.0 reverse_mask bool Flag whether the reverse mask should be used False init_identity bool Flag, initialize transform as identity True Source code in normflows/flows/neural_spline/wrapper.py 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 def __init__ ( self , num_input_channels , num_blocks , num_hidden_channels , num_context_channels = None , num_bins = 8 , tails = \"linear\" , tail_bound = 3.0 , activation = nn . ReLU , dropout_probability = 0.0 , reverse_mask = False , init_identity = True , ): \"\"\"Constructor Args: num_input_channels (int): Flow dimension num_blocks (int): Number of residual blocks of the parameter NN num_hidden_channels (int): Number of hidden units of the NN num_context_channels (int): Number of context/conditional channels num_bins (int): Number of bins tails (str): Behaviour of the tails of the distribution, can be linear, circular for periodic distribution, or None for distribution on the compact interval tail_bound (float): Bound of the spline tails activation (torch module): Activation function dropout_probability (float): Dropout probability of the NN reverse_mask (bool): Flag whether the reverse mask should be used init_identity (bool): Flag, initialize transform as identity \"\"\" super () . __init__ () def transform_net_create_fn ( in_features , out_features ): net = ResidualNet ( in_features = in_features , out_features = out_features , context_features = num_context_channels , hidden_features = num_hidden_channels , num_blocks = num_blocks , activation = activation (), dropout_probability = dropout_probability , use_batch_norm = False , ) if init_identity : torch . nn . init . constant_ ( net . final_layer . weight , 0.0 ) torch . nn . init . constant_ ( net . final_layer . bias , np . log ( np . exp ( 1 - DEFAULT_MIN_DERIVATIVE ) - 1 ) ) return net self . prqct = PiecewiseRationalQuadraticCoupling ( mask = create_alternating_binary_mask ( num_input_channels , even = reverse_mask ), transform_net_create_fn = transform_net_create_fn , num_bins = num_bins , tails = tails , tail_bound = tail_bound , # Setting True corresponds to equations (4), (5), (6) in the NSF paper: apply_unconditional_transform = True , )","title":"__init__"},{"location":"references/#normflows.flows.neural_spline.wrapper_test","text":"","title":"wrapper_test"},{"location":"references/#normflows.flows.normalization","text":"","title":"normalization"},{"location":"references/#normflows.flows.normalization.ActNorm","text":"Bases: AffineConstFlow An AffineConstFlow but with a data-dependent initialization, where on the very first batch we clever initialize the s,t so that the output is unit gaussian. As described in Glow paper. Source code in normflows/flows/normalization.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 class ActNorm ( AffineConstFlow ): \"\"\" An AffineConstFlow but with a data-dependent initialization, where on the very first batch we clever initialize the s,t so that the output is unit gaussian. As described in Glow paper. \"\"\" def __init__ ( self , * args , ** kwargs ): super () . __init__ ( * args , ** kwargs ) self . data_dep_init_done_cpu = torch . tensor ( 0.0 ) self . register_buffer ( \"data_dep_init_done\" , self . data_dep_init_done_cpu ) def forward ( self , z ): # first batch is used for initialization, c.f. batchnorm if not self . data_dep_init_done > 0.0 : assert self . s is not None and self . t is not None s_init = - torch . log ( z . std ( dim = self . batch_dims , keepdim = True ) + 1e-6 ) self . s . data = s_init . data self . t . data = ( - z . mean ( dim = self . batch_dims , keepdim = True ) * torch . exp ( self . s ) ) . data self . data_dep_init_done = torch . tensor ( 1.0 ) return super () . forward ( z ) def inverse ( self , z ): # first batch is used for initialization, c.f. batchnorm if not self . data_dep_init_done : assert self . s is not None and self . t is not None s_init = torch . log ( z . std ( dim = self . batch_dims , keepdim = True ) + 1e-6 ) self . s . data = s_init . data self . t . data = z . mean ( dim = self . batch_dims , keepdim = True ) . data self . data_dep_init_done = torch . tensor ( 1.0 ) return super () . inverse ( z )","title":"ActNorm"},{"location":"references/#normflows.flows.normalization.BatchNorm","text":"Bases: Flow Batch Normalization with out considering the derivatives of the batch statistics, see arXiv: 1605.08803 Source code in normflows/flows/normalization.py 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 class BatchNorm ( Flow ): \"\"\" Batch Normalization with out considering the derivatives of the batch statistics, see [arXiv: 1605.08803](https://arxiv.org/abs/1605.08803) \"\"\" def __init__ ( self , eps = 1.0e-10 ): super () . __init__ () self . eps_cpu = torch . tensor ( eps ) self . register_buffer ( \"eps\" , self . eps_cpu ) def forward ( self , z ): \"\"\" Do batch norm over batch and sample dimension \"\"\" mean = torch . mean ( z , dim = 0 , keepdims = True ) std = torch . std ( z , dim = 0 , keepdims = True ) z_ = ( z - mean ) / torch . sqrt ( std ** 2 + self . eps ) log_det = torch . log ( 1 / torch . prod ( torch . sqrt ( std ** 2 + self . eps ))) . repeat ( z . size ()[ 0 ] ) return z_ , log_det","title":"BatchNorm"},{"location":"references/#normflows.flows.normalization.BatchNorm.forward","text":"Do batch norm over batch and sample dimension Source code in normflows/flows/normalization.py 52 53 54 55 56 57 58 59 60 61 62 def forward ( self , z ): \"\"\" Do batch norm over batch and sample dimension \"\"\" mean = torch . mean ( z , dim = 0 , keepdims = True ) std = torch . std ( z , dim = 0 , keepdims = True ) z_ = ( z - mean ) / torch . sqrt ( std ** 2 + self . eps ) log_det = torch . log ( 1 / torch . prod ( torch . sqrt ( std ** 2 + self . eps ))) . repeat ( z . size ()[ 0 ] ) return z_ , log_det","title":"forward"},{"location":"references/#normflows.flows.periodic","text":"","title":"periodic"},{"location":"references/#normflows.flows.periodic.PeriodicShift","text":"Bases: Flow Shift and wrap periodic coordinates Source code in normflows/flows/periodic.py 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 class PeriodicShift ( Flow ): \"\"\" Shift and wrap periodic coordinates \"\"\" def __init__ ( self , ind , bound = 1.0 , shift = 0.0 ): \"\"\"Constructor Args: ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval shift: Tensor, shift to be applied \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound if torch . is_tensor ( shift ): self . register_buffer ( \"shift\" , shift ) else : self . shift = shift def forward ( self , z ): z_ = z . clone () z_ [ ... , self . ind ] = ( torch . remainder ( z_ [ ... , self . ind ] + self . shift + self . bound , 2 * self . bound ) - self . bound ) return z_ , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device ) def inverse ( self , z ): z_ = z . clone () z_ [ ... , self . ind ] = ( torch . remainder ( z_ [ ... , self . ind ] - self . shift + self . bound , 2 * self . bound ) - self . bound ) return z_ , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device )","title":"PeriodicShift"},{"location":"references/#normflows.flows.periodic.PeriodicShift.__init__","text":"Constructor Parameters: Name Type Description Default ind Iterable, indices of coordinates to be mapped required bound Float or iterable, bound of interval 1.0 shift Tensor, shift to be applied 0.0 Source code in normflows/flows/periodic.py 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 def __init__ ( self , ind , bound = 1.0 , shift = 0.0 ): \"\"\"Constructor Args: ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval shift: Tensor, shift to be applied \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound if torch . is_tensor ( shift ): self . register_buffer ( \"shift\" , shift ) else : self . shift = shift","title":"__init__"},{"location":"references/#normflows.flows.periodic.PeriodicWrap","text":"Bases: Flow Map periodic coordinates to fixed interval Source code in normflows/flows/periodic.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 class PeriodicWrap ( Flow ): \"\"\" Map periodic coordinates to fixed interval \"\"\" def __init__ ( self , ind , bound = 1.0 ): \"\"\"Constructor ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound def forward ( self , z ): return z , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device ) def inverse ( self , z ): z_ = z . clone () z_ [ ... , self . ind ] = ( torch . remainder ( z_ [ ... , self . ind ] + self . bound , 2 * self . bound ) - self . bound ) return z_ , torch . zeros ( len ( z ), dtype = z . dtype , device = z . device )","title":"PeriodicWrap"},{"location":"references/#normflows.flows.periodic.PeriodicWrap.__init__","text":"Constructor ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval Source code in normflows/flows/periodic.py 11 12 13 14 15 16 17 18 19 20 21 22 def __init__ ( self , ind , bound = 1.0 ): \"\"\"Constructor ind: Iterable, indices of coordinates to be mapped bound: Float or iterable, bound of interval \"\"\" super () . __init__ () self . ind = ind if torch . is_tensor ( bound ): self . register_buffer ( \"bound\" , bound ) else : self . bound = bound","title":"__init__"},{"location":"references/#normflows.flows.periodic_test","text":"","title":"periodic_test"},{"location":"references/#normflows.flows.planar","text":"","title":"planar"},{"location":"references/#normflows.flows.planar.Planar","text":"Bases: Flow Planar flow as introduced in arXiv: 1505.05770 f(z) = z + u * h(w * z + b) Source code in normflows/flows/planar.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class Planar ( Flow ): \"\"\"Planar flow as introduced in [arXiv: 1505.05770](https://arxiv.org/abs/1505.05770) ``` f(z) = z + u * h(w * z + b) ``` \"\"\" def __init__ ( self , shape , act = \"tanh\" , u = None , w = None , b = None ): \"\"\"Constructor of the planar flow Args: shape: shape of the latent variable z h: nonlinear function h of the planar flow (see definition of f above) u,w,b: optional initialization for parameters \"\"\" super () . __init__ () lim_w = np . sqrt ( 2.0 / np . prod ( shape )) lim_u = np . sqrt ( 2 ) if u is not None : self . u = nn . Parameter ( u ) else : self . u = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . u , - lim_u , lim_u ) if w is not None : self . w = nn . Parameter ( w ) else : self . w = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . w , - lim_w , lim_w ) if b is not None : self . b = nn . Parameter ( b ) else : self . b = nn . Parameter ( torch . zeros ( 1 )) self . act = act if act == \"tanh\" : self . h = torch . tanh elif act == \"leaky_relu\" : self . h = torch . nn . LeakyReLU ( negative_slope = 0.2 ) else : raise NotImplementedError ( \"Nonlinearity is not implemented.\" ) def forward ( self , z ): lin = torch . sum ( self . w * z , list ( range ( 1 , self . w . dim ())), keepdim = True ) + self . b inner = torch . sum ( self . w * self . u ) u = self . u + ( torch . log ( 1 + torch . exp ( inner )) - 1 - inner ) \\ * self . w / torch . sum ( self . w ** 2 ) # constraint w.T * u > -1 if self . act == \"tanh\" : h_ = lambda x : 1 / torch . cosh ( x ) ** 2 elif self . act == \"leaky_relu\" : h_ = lambda x : ( x < 0 ) * ( self . h . negative_slope - 1.0 ) + 1.0 z_ = z + u * self . h ( lin ) log_det = torch . log ( torch . abs ( 1 + torch . sum ( self . w * u ) * h_ ( lin . reshape ( - 1 )))) return z_ , log_det def inverse ( self , z ): if self . act != \"leaky_relu\" : raise NotImplementedError ( \"This flow has no algebraic inverse.\" ) lin = torch . sum ( self . w * z , list ( range ( 1 , self . w . dim ()))) + self . b a = ( lin < 0 ) * ( self . h . negative_slope - 1.0 ) + 1.0 # absorb leakyReLU slope into u inner = torch . sum ( self . w * self . u ) u = self . u + ( torch . log ( 1 + torch . exp ( inner )) - 1 - inner ) \\ * self . w / torch . sum ( self . w ** 2 ) dims = [ - 1 ] + ( u . dim () - 1 ) * [ 1 ] u = a . reshape ( * dims ) * u inner_ = torch . sum ( self . w * u , list ( range ( 1 , self . w . dim ()))) z_ = z - u * ( lin / ( 1 + inner_ )) . reshape ( * dims ) log_det = - torch . log ( torch . abs ( 1 + inner_ )) return z_ , log_det","title":"Planar"},{"location":"references/#normflows.flows.planar.Planar.__init__","text":"Constructor of the planar flow Parameters: Name Type Description Default shape shape of the latent variable z required h nonlinear function h of the planar flow (see definition of f above) required u,w,b optional initialization for parameters required Source code in normflows/flows/planar.py 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 def __init__ ( self , shape , act = \"tanh\" , u = None , w = None , b = None ): \"\"\"Constructor of the planar flow Args: shape: shape of the latent variable z h: nonlinear function h of the planar flow (see definition of f above) u,w,b: optional initialization for parameters \"\"\" super () . __init__ () lim_w = np . sqrt ( 2.0 / np . prod ( shape )) lim_u = np . sqrt ( 2 ) if u is not None : self . u = nn . Parameter ( u ) else : self . u = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . u , - lim_u , lim_u ) if w is not None : self . w = nn . Parameter ( w ) else : self . w = nn . Parameter ( torch . empty ( shape )[ None ]) nn . init . uniform_ ( self . w , - lim_w , lim_w ) if b is not None : self . b = nn . Parameter ( b ) else : self . b = nn . Parameter ( torch . zeros ( 1 )) self . act = act if act == \"tanh\" : self . h = torch . tanh elif act == \"leaky_relu\" : self . h = torch . nn . LeakyReLU ( negative_slope = 0.2 ) else : raise NotImplementedError ( \"Nonlinearity is not implemented.\" )","title":"__init__"},{"location":"references/#normflows.flows.planar_test","text":"","title":"planar_test"},{"location":"references/#normflows.flows.radial","text":"","title":"radial"},{"location":"references/#normflows.flows.radial.Radial","text":"Bases: Flow Radial flow as introduced in arXiv: 1505.05770 f(z) = z + beta * h(alpha, r) * (z - z_0) Source code in normflows/flows/radial.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 class Radial ( Flow ): \"\"\"Radial flow as introduced in [arXiv: 1505.05770](https://arxiv.org/abs/1505.05770) ``` f(z) = z + beta * h(alpha, r) * (z - z_0) ``` \"\"\" def __init__ ( self , shape , z_0 = None ): \"\"\"Constructor of the radial flow Args: shape: shape of the latent variable z z_0: parameter of the radial flow \"\"\" super () . __init__ () self . d_cpu = torch . prod ( torch . tensor ( shape )) self . register_buffer ( \"d\" , self . d_cpu ) self . beta = nn . Parameter ( torch . empty ( 1 )) lim = 1.0 / np . prod ( shape ) nn . init . uniform_ ( self . beta , - lim - 1.0 , lim - 1.0 ) self . alpha = nn . Parameter ( torch . empty ( 1 )) nn . init . uniform_ ( self . alpha , - lim , lim ) if z_0 is not None : self . z_0 = nn . Parameter ( z_0 ) else : self . z_0 = nn . Parameter ( torch . randn ( shape )[ None ]) def forward ( self , z ): beta = torch . log ( 1 + torch . exp ( self . beta )) - torch . abs ( self . alpha ) dz = z - self . z_0 r = torch . linalg . vector_norm ( dz , dim = list ( range ( 1 , self . z_0 . dim ())), keepdim = True ) h_arr = beta / ( torch . abs ( self . alpha ) + r ) h_arr_ = - beta * r / ( torch . abs ( self . alpha ) + r ) ** 2 z_ = z + h_arr * dz log_det = ( self . d - 1 ) * torch . log ( 1 + h_arr ) + torch . log ( 1 + h_arr + h_arr_ ) log_det = log_det . reshape ( - 1 ) return z_ , log_det","title":"Radial"},{"location":"references/#normflows.flows.radial.Radial.__init__","text":"Constructor of the radial flow Parameters: Name Type Description Default shape shape of the latent variable z required z_0 parameter of the radial flow None Source code in normflows/flows/radial.py 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 def __init__ ( self , shape , z_0 = None ): \"\"\"Constructor of the radial flow Args: shape: shape of the latent variable z z_0: parameter of the radial flow \"\"\" super () . __init__ () self . d_cpu = torch . prod ( torch . tensor ( shape )) self . register_buffer ( \"d\" , self . d_cpu ) self . beta = nn . Parameter ( torch . empty ( 1 )) lim = 1.0 / np . prod ( shape ) nn . init . uniform_ ( self . beta , - lim - 1.0 , lim - 1.0 ) self . alpha = nn . Parameter ( torch . empty ( 1 )) nn . init . uniform_ ( self . alpha , - lim , lim ) if z_0 is not None : self . z_0 = nn . Parameter ( z_0 ) else : self . z_0 = nn . Parameter ( torch . randn ( shape )[ None ])","title":"__init__"},{"location":"references/#normflows.flows.radial_test","text":"","title":"radial_test"},{"location":"references/#normflows.flows.reshape","text":"","title":"reshape"},{"location":"references/#normflows.flows.reshape.Merge","text":"Bases: Split Same as Split but with forward and backward pass interchanged Source code in normflows/flows/reshape.py 88 89 90 91 92 93 94 95 96 97 98 99 100 class Merge ( Split ): \"\"\" Same as Split but with forward and backward pass interchanged \"\"\" def __init__ ( self , mode = \"channel\" ): super () . __init__ ( mode ) def forward ( self , z ): return super () . inverse ( z ) def inverse ( self , z ): return super () . forward ( z )","title":"Merge"},{"location":"references/#normflows.flows.reshape.Split","text":"Bases: Flow Split features into two sets Source code in normflows/flows/reshape.py 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 class Split ( Flow ): \"\"\" Split features into two sets \"\"\" def __init__ ( self , mode = \"channel\" ): \"\"\"Constructor The splitting mode can be: - channel: Splits first feature dimension, usually channels, into two halfs - channel_inv: Same as channel, but with z1 and z2 flipped - checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even) - checkerboard_inv: Same as checkerboard, but with inverted coloring Args: mode: splitting mode \"\"\" super () . __init__ () self . mode = mode def forward ( self , z ): if self . mode == \"channel\" : z1 , z2 = z . chunk ( 2 , dim = 1 ) elif self . mode == \"channel_inv\" : z2 , z1 = z . chunk ( 2 , dim = 1 ) elif \"checkerboard\" in self . mode : n_dims = z . dim () cb0 = 0 cb1 = 1 for i in range ( 1 , n_dims ): cb0_ = cb0 cb1_ = cb1 cb0 = [ cb0_ if j % 2 == 0 else cb1_ for j in range ( z . size ( n_dims - i ))] cb1 = [ cb1_ if j % 2 == 0 else cb0_ for j in range ( z . size ( n_dims - i ))] cb = cb1 if \"inv\" in self . mode else cb0 cb = torch . tensor ( cb )[ None ] . repeat ( len ( z ), * (( n_dims - 1 ) * [ 1 ])) cb = cb . to ( z . device ) z_size = z . size () z1 = z . reshape ( - 1 )[ torch . nonzero ( cb . view ( - 1 ), as_tuple = False )] . view ( * z_size [: - 1 ], - 1 ) z2 = z . reshape ( - 1 )[ torch . nonzero (( 1 - cb ) . view ( - 1 ), as_tuple = False )] . view ( * z_size [: - 1 ], - 1 ) else : raise NotImplementedError ( \"Mode \" + self . mode + \" is not implemented.\" ) log_det = 0 return [ z1 , z2 ], log_det def inverse ( self , z ): z1 , z2 = z if self . mode == \"channel\" : z = torch . cat ([ z1 , z2 ], 1 ) elif self . mode == \"channel_inv\" : z = torch . cat ([ z2 , z1 ], 1 ) elif \"checkerboard\" in self . mode : n_dims = z1 . dim () z_size = list ( z1 . size ()) z_size [ - 1 ] *= 2 cb0 = 0 cb1 = 1 for i in range ( 1 , n_dims ): cb0_ = cb0 cb1_ = cb1 cb0 = [ cb0_ if j % 2 == 0 else cb1_ for j in range ( z_size [ n_dims - i ])] cb1 = [ cb1_ if j % 2 == 0 else cb0_ for j in range ( z_size [ n_dims - i ])] cb = cb1 if \"inv\" in self . mode else cb0 cb = torch . tensor ( cb )[ None ] . repeat ( z_size [ 0 ], * (( n_dims - 1 ) * [ 1 ])) cb = cb . to ( z1 . device ) z1 = z1 [ ... , None ] . repeat ( * ( n_dims * [ 1 ]), 2 ) . view ( * z_size [: - 1 ], - 1 ) z2 = z2 [ ... , None ] . repeat ( * ( n_dims * [ 1 ]), 2 ) . view ( * z_size [: - 1 ], - 1 ) z = cb * z1 + ( 1 - cb ) * z2 else : raise NotImplementedError ( \"Mode \" + self . mode + \" is not implemented.\" ) log_det = 0 return z , log_det","title":"Split"},{"location":"references/#normflows.flows.reshape.Split.__init__","text":"Constructor The splitting mode can be: channel: Splits first feature dimension, usually channels, into two halfs channel_inv: Same as channel, but with z1 and z2 flipped checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even) checkerboard_inv: Same as checkerboard, but with inverted coloring Parameters: Name Type Description Default mode splitting mode 'channel' Source code in normflows/flows/reshape.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 def __init__ ( self , mode = \"channel\" ): \"\"\"Constructor The splitting mode can be: - channel: Splits first feature dimension, usually channels, into two halfs - channel_inv: Same as channel, but with z1 and z2 flipped - checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even) - checkerboard_inv: Same as checkerboard, but with inverted coloring Args: mode: splitting mode \"\"\" super () . __init__ () self . mode = mode","title":"__init__"},{"location":"references/#normflows.flows.reshape.Squeeze","text":"Bases: Flow Squeeze operation of multi-scale architecture, RealNVP or Glow paper Source code in normflows/flows/reshape.py 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 class Squeeze ( Flow ): \"\"\" Squeeze operation of multi-scale architecture, RealNVP or Glow paper \"\"\" def __init__ ( self ): \"\"\" Constructor \"\"\" super () . __init__ () def forward ( self , z ): log_det = 0 s = z . size () z = z . view ( s [ 0 ], s [ 1 ] // 4 , 2 , 2 , s [ 2 ], s [ 3 ]) z = z . permute ( 0 , 1 , 4 , 2 , 5 , 3 ) . contiguous () z = z . view ( s [ 0 ], s [ 1 ] // 4 , 2 * s [ 2 ], 2 * s [ 3 ]) return z , log_det def inverse ( self , z ): log_det = 0 s = z . size () z = z . view ( * s [: 2 ], s [ 2 ] // 2 , 2 , s [ 3 ] // 2 , 2 ) z = z . permute ( 0 , 1 , 3 , 5 , 2 , 4 ) . contiguous () z = z . view ( s [ 0 ], 4 * s [ 1 ], s [ 2 ] // 2 , s [ 3 ] // 2 ) return z , log_det","title":"Squeeze"},{"location":"references/#normflows.flows.reshape.Squeeze.__init__","text":"Constructor Source code in normflows/flows/reshape.py 108 109 110 111 112 def __init__ ( self ): \"\"\" Constructor \"\"\" super () . __init__ ()","title":"__init__"},{"location":"references/#normflows.flows.residual","text":"","title":"residual"},{"location":"references/#normflows.flows.residual.Residual","text":"Bases: Flow Invertible residual net block, wrapper to the implementation of Chen et al., see sources Source code in normflows/flows/residual.py 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 class Residual ( Flow ): \"\"\" Invertible residual net block, wrapper to the implementation of Chen et al., see [sources](https://github.com/rtqichen/residual-flows) \"\"\" def __init__ ( self , net , reverse = True , reduce_memory = True , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" ): \"\"\"Constructor Args: net: Neural network, must be Lipschitz continuous with L < 1 reverse: Flag, if true the map ```f(x) = x + net(x)``` is applied in the inverse pass, otherwise it is done in forward reduce_memory: Flag, if true Neumann series and precomputations, for backward pass in forward pass are done geom_p: Parameter of the geometric distribution used for the Neumann series lamb: Parameter of the geometric distribution used for the Neumann series n_power_series: Number of terms in the Neumann series exact_trace: Flag, if true the trace of the Jacobian is computed exactly brute_force: Flag, if true the Jacobian is computed exactly in 2D n_samples: Number of samples used to estimate power series n_exact_terms: Number of terms always included in the power series n_dist: Distribution used for the power series, either \"geometric\" or \"poisson\" \"\"\" super () . __init__ () self . reverse = reverse self . iresblock = iResBlock ( net , n_samples = n_samples , n_exact_terms = n_exact_terms , neumann_grad = reduce_memory , grad_in_forward = reduce_memory , exact_trace = exact_trace , geom_p = geom_p , lamb = lamb , n_power_series = n_power_series , brute_force = brute_force , n_dist = n_dist , ) def forward ( self , z ): if self . reverse : z , log_det = self . iresblock . inverse ( z , 0 ) else : z , log_det = self . iresblock . forward ( z , 0 ) return z , - log_det . view ( - 1 ) def inverse ( self , z ): if self . reverse : z , log_det = self . iresblock . forward ( z , 0 ) else : z , log_det = self . iresblock . inverse ( z , 0 ) return z , - log_det . view ( - 1 )","title":"Residual"},{"location":"references/#normflows.flows.residual.Residual.__init__","text":"Constructor Parameters: Name Type Description Default net Neural network, must be Lipschitz continuous with L < 1 required reverse Flag, if true the map f(x) = x + net(x) is applied in the inverse pass, otherwise it is done in forward True reduce_memory Flag, if true Neumann series and precomputations, for backward pass in forward pass are done True geom_p Parameter of the geometric distribution used for the Neumann series 0.5 lamb Parameter of the geometric distribution used for the Neumann series 2.0 n_power_series Number of terms in the Neumann series None exact_trace Flag, if true the trace of the Jacobian is computed exactly False brute_force Flag, if true the Jacobian is computed exactly in 2D False n_samples Number of samples used to estimate power series 1 n_exact_terms Number of terms always included in the power series 2 n_dist Distribution used for the power series, either \"geometric\" or \"poisson\" 'geometric' Source code in normflows/flows/residual.py 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 def __init__ ( self , net , reverse = True , reduce_memory = True , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" ): \"\"\"Constructor Args: net: Neural network, must be Lipschitz continuous with L < 1 reverse: Flag, if true the map ```f(x) = x + net(x)``` is applied in the inverse pass, otherwise it is done in forward reduce_memory: Flag, if true Neumann series and precomputations, for backward pass in forward pass are done geom_p: Parameter of the geometric distribution used for the Neumann series lamb: Parameter of the geometric distribution used for the Neumann series n_power_series: Number of terms in the Neumann series exact_trace: Flag, if true the trace of the Jacobian is computed exactly brute_force: Flag, if true the Jacobian is computed exactly in 2D n_samples: Number of samples used to estimate power series n_exact_terms: Number of terms always included in the power series n_dist: Distribution used for the power series, either \"geometric\" or \"poisson\" \"\"\" super () . __init__ () self . reverse = reverse self . iresblock = iResBlock ( net , n_samples = n_samples , n_exact_terms = n_exact_terms , neumann_grad = reduce_memory , grad_in_forward = reduce_memory , exact_trace = exact_trace , geom_p = geom_p , lamb = lamb , n_power_series = n_power_series , brute_force = brute_force , n_dist = n_dist , )","title":"__init__"},{"location":"references/#normflows.flows.residual.iResBlock","text":"Bases: Module Source code in normflows/flows/residual.py 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 class iResBlock ( nn . Module ): def __init__ ( self , nnet , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" , neumann_grad = True , grad_in_forward = False , ): \"\"\" Args: nnet: a nn.Module n_power_series: number of power series. If not None, uses a biased approximation to logdet. exact_trace: if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. brute_force: Computes the exact logdet. Only available for 2D inputs. \"\"\" nn . Module . __init__ ( self ) self . nnet = nnet self . n_dist = n_dist self . geom_p = nn . Parameter ( torch . tensor ( np . log ( geom_p ) - np . log ( 1.0 - geom_p ))) self . lamb = nn . Parameter ( torch . tensor ( lamb )) self . n_samples = n_samples self . n_power_series = n_power_series self . exact_trace = exact_trace self . brute_force = brute_force self . n_exact_terms = n_exact_terms self . grad_in_forward = grad_in_forward self . neumann_grad = neumann_grad # store the samples of n. self . register_buffer ( \"last_n_samples\" , torch . zeros ( self . n_samples )) self . register_buffer ( \"last_firmom\" , torch . zeros ( 1 )) self . register_buffer ( \"last_secmom\" , torch . zeros ( 1 )) def forward ( self , x , logpx = None ): if logpx is None : y = x + self . nnet ( x ) return y else : g , logdetgrad = self . _logdetgrad ( x ) return x + g , logpx - logdetgrad def inverse ( self , y , logpy = None ): x = self . _inverse_fixed_point ( y ) if logpy is None : return x else : return x , logpy + self . _logdetgrad ( x )[ 1 ] def _inverse_fixed_point ( self , y , atol = 1e-5 , rtol = 1e-5 ): x , x_prev = y - self . nnet ( y ), y i = 0 tol = atol + y . abs () * rtol while not torch . all (( x - x_prev ) ** 2 / tol < 1 ): x , x_prev = y - self . nnet ( x ), x i += 1 if i > 1000 : break return x def _logdetgrad ( self , x ): \"\"\"Returns g(x) and ```logdet|d(x+g(x))/dx|```\"\"\" with torch . enable_grad (): if ( self . brute_force or not self . training ) and ( x . ndimension () == 2 and x . shape [ 1 ] == 2 ): ########################################### # Brute-force compute Jacobian determinant. ########################################### x = x . requires_grad_ ( True ) g = self . nnet ( x ) # Brute-force logdet only available for 2D. jac = batch_jacobian ( g , x ) batch_dets = ( jac [:, 0 , 0 ] + 1 ) * ( jac [:, 1 , 1 ] + 1 ) - jac [ :, 0 , 1 ] * jac [:, 1 , 0 ] return g , torch . log ( torch . abs ( batch_dets )) . view ( - 1 , 1 ) if self . n_dist == \"geometric\" : geom_p = torch . sigmoid ( self . geom_p ) . item () sample_fn = lambda m : geometric_sample ( geom_p , m ) rcdf_fn = lambda k , offset : geometric_1mcdf ( geom_p , k , offset ) elif self . n_dist == \"poisson\" : lamb = self . lamb . item () sample_fn = lambda m : poisson_sample ( lamb , m ) rcdf_fn = lambda k , offset : poisson_1mcdf ( lamb , k , offset ) if self . training : if self . n_power_series is None : # Unbiased estimation. lamb = self . lamb . item () n_samples = sample_fn ( self . n_samples ) n_power_series = max ( n_samples ) + self . n_exact_terms coeff_fn = ( lambda k : 1 / rcdf_fn ( k , self . n_exact_terms ) * sum ( n_samples >= k - self . n_exact_terms ) / len ( n_samples ) ) else : # Truncated estimation. n_power_series = self . n_power_series coeff_fn = lambda k : 1.0 else : # Unbiased estimation with more exact terms. lamb = self . lamb . item () n_samples = sample_fn ( self . n_samples ) n_power_series = max ( n_samples ) + 20 coeff_fn = ( lambda k : 1 / rcdf_fn ( k , 20 ) * sum ( n_samples >= k - 20 ) / len ( n_samples ) ) if not self . exact_trace : #################################### # Power series with trace estimator. #################################### vareps = torch . randn_like ( x ) # Choose the type of estimator. if self . training and self . neumann_grad : estimator_fn = neumann_logdet_estimator else : estimator_fn = basic_logdet_estimator # Do backprop-in-forward to save memory. if self . training and self . grad_in_forward : g , logdetgrad = mem_eff_wrapper ( estimator_fn , self . nnet , x , n_power_series , vareps , coeff_fn , self . training , ) else : x = x . requires_grad_ ( True ) g = self . nnet ( x ) logdetgrad = estimator_fn ( g , x , n_power_series , vareps , coeff_fn , self . training ) else : ############################################ # Power series with exact trace computation. ############################################ x = x . requires_grad_ ( True ) g = self . nnet ( x ) jac = batch_jacobian ( g , x ) logdetgrad = batch_trace ( jac ) jac_k = jac for k in range ( 2 , n_power_series + 1 ): jac_k = torch . bmm ( jac , jac_k ) logdetgrad = logdetgrad + ( - 1 ) ** ( k + 1 ) / k * coeff_fn ( k ) * batch_trace ( jac_k ) if self . training and self . n_power_series is None : self . last_n_samples . copy_ ( torch . tensor ( n_samples ) . to ( self . last_n_samples ) ) estimator = logdetgrad . detach () self . last_firmom . copy_ ( torch . mean ( estimator ) . to ( self . last_firmom )) self . last_secmom . copy_ ( torch . mean ( estimator ** 2 ) . to ( self . last_secmom )) return g , logdetgrad . view ( - 1 , 1 ) def extra_repr ( self ): return \"dist= {} , n_samples= {} , n_power_series= {} , neumann_grad= {} , exact_trace= {} , brute_force= {} \" . format ( self . n_dist , self . n_samples , self . n_power_series , self . neumann_grad , self . exact_trace , self . brute_force , )","title":"iResBlock"},{"location":"references/#normflows.flows.residual.iResBlock.__init__","text":"Parameters: Name Type Description Default nnet a nn.Module required n_power_series number of power series. If not None, uses a biased approximation to logdet. None exact_trace if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. False brute_force Computes the exact logdet. Only available for 2D inputs. False Source code in normflows/flows/residual.py 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 def __init__ ( self , nnet , geom_p = 0.5 , lamb = 2.0 , n_power_series = None , exact_trace = False , brute_force = False , n_samples = 1 , n_exact_terms = 2 , n_dist = \"geometric\" , neumann_grad = True , grad_in_forward = False , ): \"\"\" Args: nnet: a nn.Module n_power_series: number of power series. If not None, uses a biased approximation to logdet. exact_trace: if False, uses a Hutchinson trace estimator. Otherwise computes the exact full Jacobian. brute_force: Computes the exact logdet. Only available for 2D inputs. \"\"\" nn . Module . __init__ ( self ) self . nnet = nnet self . n_dist = n_dist self . geom_p = nn . Parameter ( torch . tensor ( np . log ( geom_p ) - np . log ( 1.0 - geom_p ))) self . lamb = nn . Parameter ( torch . tensor ( lamb )) self . n_samples = n_samples self . n_power_series = n_power_series self . exact_trace = exact_trace self . brute_force = brute_force self . n_exact_terms = n_exact_terms self . grad_in_forward = grad_in_forward self . neumann_grad = neumann_grad # store the samples of n. self . register_buffer ( \"last_n_samples\" , torch . zeros ( self . n_samples )) self . register_buffer ( \"last_firmom\" , torch . zeros ( 1 )) self . register_buffer ( \"last_secmom\" , torch . zeros ( 1 ))","title":"__init__"},{"location":"references/#normflows.flows.residual_test","text":"","title":"residual_test"},{"location":"references/#normflows.flows.stochastic","text":"","title":"stochastic"},{"location":"references/#normflows.flows.stochastic.HamiltonianMonteCarlo","text":"Bases: Flow Flow layer using the HMC proposal in Stochastic Normalising Flows See arXiv: 2002.06707 Source code in normflows/flows/stochastic.py 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 class HamiltonianMonteCarlo ( Flow ): \"\"\"Flow layer using the HMC proposal in Stochastic Normalising Flows See [arXiv: 2002.06707](https://arxiv.org/abs/2002.06707) \"\"\" def __init__ ( self , target , steps , log_step_size , log_mass , max_abs_grad = None ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. steps: The number of leapfrog steps log_step_size: The log step size used in the leapfrog integrator. shape (dim) log_mass: The log_mass determining the variance of the momentum samples. shape (dim) max_abs_grad: Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability.\"\"\" super () . __init__ () self . target = target self . steps = steps self . register_parameter ( \"log_step_size\" , torch . nn . Parameter ( log_step_size )) self . register_parameter ( \"log_mass\" , torch . nn . Parameter ( log_mass )) self . max_abs_grad = max_abs_grad def forward ( self , z ): # Draw momentum p = torch . randn_like ( z ) * torch . exp ( 0.5 * self . log_mass ) # leapfrog z_new = z . clone () p_new = p . clone () step_size = torch . exp ( self . log_step_size ) for i in range ( self . steps ): p_half = p_new - ( step_size / 2.0 ) * - self . gradlogP ( z_new ) z_new = z_new + step_size * ( p_half / torch . exp ( self . log_mass )) p_new = p_half - ( step_size / 2.0 ) * - self . gradlogP ( z_new ) # Metropolis Hastings correction probabilities = torch . exp ( self . target . log_prob ( z_new ) - self . target . log_prob ( z ) - 0.5 * torch . sum ( p_new ** 2 / torch . exp ( self . log_mass ), 1 ) + 0.5 * torch . sum ( p ** 2 / torch . exp ( self . log_mass ), 1 ) ) uniforms = torch . rand_like ( probabilities ) mask = uniforms < probabilities z_out = torch . where ( mask . unsqueeze ( 1 ), z_new , z ) return z_out , self . target . log_prob ( z ) - self . target . log_prob ( z_out ) def inverse ( self , z ): return self . forward ( z ) def gradlogP ( self , z ): z_ = z . detach () . requires_grad_ () logp = self . target . log_prob ( z_ ) grad = torch . autograd . grad ( logp , z_ , grad_outputs = torch . ones_like ( logp ))[ 0 ] if self . max_abs_grad : grad = torch . clamp ( grad , max = self . max_abs_grad , min =- self . max_abs_grad ) return grad","title":"HamiltonianMonteCarlo"},{"location":"references/#normflows.flows.stochastic.HamiltonianMonteCarlo.__init__","text":"Constructor Parameters: Name Type Description Default target The stationary distribution of this Markov transition, i.e. the target distribution to sample from. required steps The number of leapfrog steps required log_step_size The log step size used in the leapfrog integrator. shape (dim) required log_mass The log_mass determining the variance of the momentum samples. shape (dim) required max_abs_grad Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability. None Source code in normflows/flows/stochastic.py 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 def __init__ ( self , target , steps , log_step_size , log_mass , max_abs_grad = None ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. steps: The number of leapfrog steps log_step_size: The log step size used in the leapfrog integrator. shape (dim) log_mass: The log_mass determining the variance of the momentum samples. shape (dim) max_abs_grad: Maximum absolute value of the gradient of the target distribution's log probability. If set to None then no gradient clipping is applied. Useful for improving numerical stability.\"\"\" super () . __init__ () self . target = target self . steps = steps self . register_parameter ( \"log_step_size\" , torch . nn . Parameter ( log_step_size )) self . register_parameter ( \"log_mass\" , torch . nn . Parameter ( log_mass )) self . max_abs_grad = max_abs_grad","title":"__init__"},{"location":"references/#normflows.flows.stochastic.MetropolisHastings","text":"Bases: Flow Sampling through Metropolis Hastings in Stochastic Normalizing Flow See arXiv: 2002.06707 Source code in normflows/flows/stochastic.py 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 class MetropolisHastings ( Flow ): \"\"\"Sampling through Metropolis Hastings in Stochastic Normalizing Flow See [arXiv: 2002.06707](https://arxiv.org/abs/2002.06707) \"\"\" def __init__ ( self , target , proposal , steps ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. proposal: Proposal distribution steps: Number of MCMC steps to perform \"\"\" super () . __init__ () self . target = target self . proposal = proposal self . steps = steps def forward ( self , z ): # Initialize number of samples and log(det) num_samples = len ( z ) log_det = torch . zeros ( num_samples , dtype = z . dtype , device = z . device ) # Get log(p) for current samples log_p = self . target . log_prob ( z ) for i in range ( self . steps ): # Make proposal and get log(p) z_ , log_p_diff = self . proposal ( z ) log_p_ = self . target . log_prob ( z_ ) # Make acceptance decision w = torch . rand ( num_samples , dtype = z . dtype , device = z . device ) log_w_accept = log_p_ - log_p + log_p_diff w_accept = torch . clamp ( torch . exp ( log_w_accept ), max = 1 ) accept = w <= w_accept # Update samples, log(det), and log(p) z = torch . where ( accept . unsqueeze ( 1 ), z_ , z ) log_det_ = log_p - log_p_ log_det = torch . where ( accept , log_det + log_det_ , log_det ) log_p = torch . where ( accept , log_p_ , log_p ) return z , log_det def inverse ( self , z ): # Equivalent to forward pass return self . forward ( z )","title":"MetropolisHastings"},{"location":"references/#normflows.flows.stochastic.MetropolisHastings.__init__","text":"Constructor Parameters: Name Type Description Default target The stationary distribution of this Markov transition, i.e. the target distribution to sample from. required proposal Proposal distribution required steps Number of MCMC steps to perform required Source code in normflows/flows/stochastic.py 12 13 14 15 16 17 18 19 20 21 22 23 def __init__ ( self , target , proposal , steps ): \"\"\"Constructor Args: target: The stationary distribution of this Markov transition, i.e. the target distribution to sample from. proposal: Proposal distribution steps: Number of MCMC steps to perform \"\"\" super () . __init__ () self . target = target self . proposal = proposal self . steps = steps","title":"__init__"},{"location":"references/#normflows.flows.stochastic_test","text":"","title":"stochastic_test"},{"location":"references/#normflows.nets","text":"","title":"nets"},{"location":"references/#normflows.nets.cnn","text":"","title":"cnn"},{"location":"references/#normflows.nets.cnn.ConvNet2d","text":"Bases: Module Convolutional Neural Network with leaky ReLU nonlinearities Source code in normflows/nets/cnn.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 class ConvNet2d ( nn . Module ): \"\"\" Convolutional Neural Network with leaky ReLU nonlinearities \"\"\" def __init__ ( self , channels , kernel_size , leaky = 0.0 , init_zeros = True , actnorm = False , weight_std = None , ): \"\"\"Constructor Args: channels: List of channels of conv layers, first entry is in_channels kernel_size: List of kernel sizes, same for height and width leaky: Leaky part of ReLU init_zeros: Flag whether last layer shall be initialized with zeros scale_output: Flag whether to scale output with a log scale parameter logscale_factor: Constant factor to be multiplied to log scaling actnorm: Flag whether activation normalization shall be done after each conv layer except output weight_std: Fixed std used to initialize every layer \"\"\" super () . __init__ () # Build network net = nn . ModuleList ([]) for i in range ( len ( kernel_size ) - 1 ): conv = nn . Conv2d ( channels [ i ], channels [ i + 1 ], kernel_size [ i ], padding = kernel_size [ i ] // 2 , bias = ( not actnorm ), ) if weight_std is not None : conv . weight . data . normal_ ( mean = 0.0 , std = weight_std ) net . append ( conv ) if actnorm : net . append ( utils . ActNorm (( channels [ i + 1 ],) + ( 1 , 1 ))) net . append ( nn . LeakyReLU ( leaky )) i = len ( kernel_size ) net . append ( nn . Conv2d ( channels [ i - 1 ], channels [ i ], kernel_size [ i - 1 ], padding = kernel_size [ i - 1 ] // 2 , ) ) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) self . net = nn . Sequential ( * net ) def forward ( self , x ): return self . net ( x )","title":"ConvNet2d"},{"location":"references/#normflows.nets.cnn.ConvNet2d.__init__","text":"Constructor Parameters: Name Type Description Default channels List of channels of conv layers, first entry is in_channels required kernel_size List of kernel sizes, same for height and width required leaky Leaky part of ReLU 0.0 init_zeros Flag whether last layer shall be initialized with zeros True scale_output Flag whether to scale output with a log scale parameter required logscale_factor Constant factor to be multiplied to log scaling required actnorm Flag whether activation normalization shall be done after each conv layer except output False weight_std Fixed std used to initialize every layer None Source code in normflows/nets/cnn.py 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 def __init__ ( self , channels , kernel_size , leaky = 0.0 , init_zeros = True , actnorm = False , weight_std = None , ): \"\"\"Constructor Args: channels: List of channels of conv layers, first entry is in_channels kernel_size: List of kernel sizes, same for height and width leaky: Leaky part of ReLU init_zeros: Flag whether last layer shall be initialized with zeros scale_output: Flag whether to scale output with a log scale parameter logscale_factor: Constant factor to be multiplied to log scaling actnorm: Flag whether activation normalization shall be done after each conv layer except output weight_std: Fixed std used to initialize every layer \"\"\" super () . __init__ () # Build network net = nn . ModuleList ([]) for i in range ( len ( kernel_size ) - 1 ): conv = nn . Conv2d ( channels [ i ], channels [ i + 1 ], kernel_size [ i ], padding = kernel_size [ i ] // 2 , bias = ( not actnorm ), ) if weight_std is not None : conv . weight . data . normal_ ( mean = 0.0 , std = weight_std ) net . append ( conv ) if actnorm : net . append ( utils . ActNorm (( channels [ i + 1 ],) + ( 1 , 1 ))) net . append ( nn . LeakyReLU ( leaky )) i = len ( kernel_size ) net . append ( nn . Conv2d ( channels [ i - 1 ], channels [ i ], kernel_size [ i - 1 ], padding = kernel_size [ i - 1 ] // 2 , ) ) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) self . net = nn . Sequential ( * net )","title":"__init__"},{"location":"references/#normflows.nets.lipschitz","text":"","title":"lipschitz"},{"location":"references/#normflows.nets.lipschitz.LipschitzCNN","text":"Bases: Module Convolutional neural network which is Lipschitz continuous with Lipschitz constant L < 1 Source code in normflows/nets/lipschitz.py 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 class LipschitzCNN ( nn . Module ): \"\"\" Convolutional neural network which is Lipschitz continuous with Lipschitz constant L < 1 \"\"\" def __init__ ( self , channels , kernel_size , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\"Constructor Args: channels: Integer list with the number of channels of the layers kernel_size: Integer list of kernel sizes of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( kernel_size ) self . channels = channels self . kernel_size = kernel_size self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormConv2d ( in_channels = channels [ i ], out_channels = channels [ i + 1 ], kernel_size = kernel_size [ i ], stride = 1 , padding = kernel_size [ i ] // 2 , bias = True , coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers ) def forward ( self , x ): return self . net ( x )","title":"LipschitzCNN"},{"location":"references/#normflows.nets.lipschitz.LipschitzCNN.__init__","text":"Constructor Parameters: Name Type Description Default channels Integer list with the number of channels of the layers required kernel_size Integer list of kernel sizes of the layers required lipschitz_const Maximum Lipschitz constant of each layer 0.97 max_lipschitz_iter Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used 5 lipschitz_tolerance Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 None init_zeros Flag, whether to initialize last layer approximately with zeros True Source code in normflows/nets/lipschitz.py 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 def __init__ ( self , channels , kernel_size , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\"Constructor Args: channels: Integer list with the number of channels of the layers kernel_size: Integer list of kernel sizes of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( kernel_size ) self . channels = channels self . kernel_size = kernel_size self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormConv2d ( in_channels = channels [ i ], out_channels = channels [ i + 1 ], kernel_size = kernel_size [ i ], stride = 1 , padding = kernel_size [ i ] // 2 , bias = True , coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers )","title":"__init__"},{"location":"references/#normflows.nets.lipschitz.LipschitzMLP","text":"Bases: Module Fully connected neural net which is Lipschitz continuou with Lipschitz constant L < 1 Source code in normflows/nets/lipschitz.py 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 class LipschitzMLP ( nn . Module ): \"\"\"Fully connected neural net which is Lipschitz continuou with Lipschitz constant L < 1\"\"\" def __init__ ( self , channels , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\" Constructor channels: Integer list with the number of channels of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( channels ) - 1 self . channels = channels self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormLinear ( in_features = channels [ i ], out_features = channels [ i + 1 ], coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers ) def forward ( self , x ): return self . net ( x )","title":"LipschitzMLP"},{"location":"references/#normflows.nets.lipschitz.LipschitzMLP.__init__","text":"Constructor channels: Integer list with the number of channels of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros Source code in normflows/nets/lipschitz.py 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 def __init__ ( self , channels , lipschitz_const = 0.97 , max_lipschitz_iter = 5 , lipschitz_tolerance = None , init_zeros = True , ): \"\"\" Constructor channels: Integer list with the number of channels of the layers lipschitz_const: Maximum Lipschitz constant of each layer max_lipschitz_iter: Maximum number of iterations used to ensure that layers are Lipschitz continuous with L smaller than set maximum; if None, tolerance is used lipschitz_tolerance: Float, tolerance used to ensure Lipschitz continuity if max_lipschitz_iter is None, typically 1e-3 init_zeros: Flag, whether to initialize last layer approximately with zeros \"\"\" super () . __init__ () self . n_layers = len ( channels ) - 1 self . channels = channels self . lipschitz_const = lipschitz_const self . max_lipschitz_iter = max_lipschitz_iter self . lipschitz_tolerance = lipschitz_tolerance self . init_zeros = init_zeros layers = [] for i in range ( self . n_layers ): layers += [ Swish (), InducedNormLinear ( in_features = channels [ i ], out_features = channels [ i + 1 ], coeff = lipschitz_const , domain = 2 , codomain = 2 , n_iterations = max_lipschitz_iter , atol = lipschitz_tolerance , rtol = lipschitz_tolerance , zero_init = init_zeros if i == ( self . n_layers - 1 ) else False , ), ] self . net = nn . Sequential ( * layers )","title":"__init__"},{"location":"references/#normflows.nets.lipschitz.projmax_","text":"Inplace argmax on absolute value. Source code in normflows/nets/lipschitz.py 651 652 653 654 655 656 def projmax_ ( v ): \"\"\"Inplace argmax on absolute value.\"\"\" ind = torch . argmax ( torch . abs ( v )) v . zero_ () v [ ind ] = 1 return v","title":"projmax_"},{"location":"references/#normflows.nets.made","text":"Implementation of MADE. Code taken from https://github.com/bayesiains/nsf","title":"made"},{"location":"references/#normflows.nets.made.MADE","text":"Bases: Module Implementation of MADE. It can use either feedforward blocks or residual blocks (default is residual). Optionally, it can use batch norm or dropout within blocks (default is no). Source code in normflows/nets/made.py 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 class MADE ( nn . Module ): \"\"\"Implementation of MADE. It can use either feedforward blocks or residual blocks (default is residual). Optionally, it can use batch norm or dropout within blocks (default is no). \"\"\" def __init__ ( self , features , hidden_features , context_features = None , num_blocks = 2 , output_multiplier = 1 , use_residual_blocks = True , random_mask = False , permute_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , preprocessing = None , ): if use_residual_blocks and random_mask : raise ValueError ( \"Residual blocks can't be used with random masks.\" ) super () . __init__ () # Preprocessing if preprocessing is None : self . preprocessing = torch . nn . Identity () else : self . preprocessing = preprocessing # Initial layer. input_degrees_ = _get_input_degrees ( features ) if permute_mask : input_degrees_ = input_degrees_ [ torch . randperm ( features )] self . initial_layer = MaskedLinear ( in_degrees = input_degrees_ , out_features = hidden_features , autoregressive_features = features , random_mask = random_mask , is_output = False , ) if context_features is not None : self . context_layer = nn . Linear ( context_features , hidden_features ) # Residual blocks. blocks = [] if use_residual_blocks : block_constructor = MaskedResidualBlock else : block_constructor = MaskedFeedforwardBlock prev_out_degrees = self . initial_layer . degrees for _ in range ( num_blocks ): blocks . append ( block_constructor ( in_degrees = prev_out_degrees , autoregressive_features = features , context_features = context_features , random_mask = random_mask , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) ) prev_out_degrees = blocks [ - 1 ] . degrees self . blocks = nn . ModuleList ( blocks ) # Final layer. self . final_layer = MaskedLinear ( in_degrees = prev_out_degrees , out_features = features * output_multiplier , autoregressive_features = features , random_mask = random_mask , is_output = True , out_degrees_ = input_degrees_ , ) def forward ( self , inputs , context = None ): outputs = self . preprocessing ( inputs ) outputs = self . initial_layer ( outputs ) if context is not None : outputs += self . context_layer ( context ) for block in self . blocks : outputs = block ( outputs , context ) outputs = self . final_layer ( outputs ) return outputs","title":"MADE"},{"location":"references/#normflows.nets.made.MaskedFeedforwardBlock","text":"Bases: Module A feedforward block based on a masked linear module. NOTE In this implementation, the number of output features is taken to be equal to the number of input features. Source code in normflows/nets/made.py 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 class MaskedFeedforwardBlock ( nn . Module ): \"\"\"A feedforward block based on a masked linear module. **NOTE** In this implementation, the number of output features is taken to be equal to the number of input features. \"\"\" def __init__ ( self , in_degrees , autoregressive_features , context_features = None , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , ): super () . __init__ () features = len ( in_degrees ) # Batch norm. if use_batch_norm : self . batch_norm = nn . BatchNorm1d ( features , eps = 1e-3 ) else : self . batch_norm = None if context_features is not None : raise NotImplementedError () # Masked linear. self . linear = MaskedLinear ( in_degrees = in_degrees , out_features = features , autoregressive_features = autoregressive_features , random_mask = random_mask , is_output = False , ) self . degrees = self . linear . degrees # Activation and dropout. self . activation = activation self . dropout = nn . Dropout ( p = dropout_probability ) def forward ( self , inputs , context = None ): if context is not None : raise NotImplementedError () if self . batch_norm : outputs = self . batch_norm ( inputs ) else : outputs = inputs outputs = self . linear ( outputs ) outputs = self . activation ( outputs ) outputs = self . dropout ( outputs ) return outputs","title":"MaskedFeedforwardBlock"},{"location":"references/#normflows.nets.made.MaskedLinear","text":"Bases: Linear A linear module with a masked weight matrix. Source code in normflows/nets/made.py 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class MaskedLinear ( nn . Linear ): \"\"\"A linear module with a masked weight matrix.\"\"\" def __init__ ( self , in_degrees , out_features , autoregressive_features , random_mask , is_output , bias = True , out_degrees_ = None , ): super () . __init__ ( in_features = len ( in_degrees ), out_features = out_features , bias = bias ) mask , degrees = self . _get_mask_and_degrees ( in_degrees = in_degrees , out_features = out_features , autoregressive_features = autoregressive_features , random_mask = random_mask , is_output = is_output , out_degrees_ = out_degrees_ , ) self . register_buffer ( \"mask\" , mask ) self . register_buffer ( \"degrees\" , degrees ) @classmethod def _get_mask_and_degrees ( cls , in_degrees , out_features , autoregressive_features , random_mask , is_output , out_degrees_ = None , ): if is_output : if out_degrees_ is None : out_degrees_ = _get_input_degrees ( autoregressive_features ) out_degrees = tile ( out_degrees_ , out_features // autoregressive_features ) mask = ( out_degrees [ ... , None ] > in_degrees ) . float () else : if random_mask : min_in_degree = torch . min ( in_degrees ) . item () min_in_degree = min ( min_in_degree , autoregressive_features - 1 ) out_degrees = torch . randint ( low = min_in_degree , high = autoregressive_features , size = [ out_features ], dtype = torch . long , ) else : max_ = max ( 1 , autoregressive_features - 1 ) min_ = min ( 1 , autoregressive_features - 1 ) out_degrees = torch . arange ( out_features ) % max_ + min_ mask = ( out_degrees [ ... , None ] >= in_degrees ) . float () return mask , out_degrees def forward ( self , x ): return F . linear ( x , self . weight * self . mask , self . bias )","title":"MaskedLinear"},{"location":"references/#normflows.nets.made.MaskedResidualBlock","text":"Bases: Module A residual block containing masked linear modules. Source code in normflows/nets/made.py 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 class MaskedResidualBlock ( nn . Module ): \"\"\"A residual block containing masked linear modules.\"\"\" def __init__ ( self , in_degrees , autoregressive_features , context_features = None , random_mask = False , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , zero_initialization = True , ): if random_mask : raise ValueError ( \"Masked residual block can't be used with random masks.\" ) super () . __init__ () features = len ( in_degrees ) if context_features is not None : self . context_layer = nn . Linear ( context_features , features ) # Batch norm. self . use_batch_norm = use_batch_norm if use_batch_norm : self . batch_norm_layers = nn . ModuleList ( [ nn . BatchNorm1d ( features , eps = 1e-3 ) for _ in range ( 2 )] ) # Masked linear. linear_0 = MaskedLinear ( in_degrees = in_degrees , out_features = features , autoregressive_features = autoregressive_features , random_mask = False , is_output = False , ) linear_1 = MaskedLinear ( in_degrees = linear_0 . degrees , out_features = features , autoregressive_features = autoregressive_features , random_mask = False , is_output = False , ) self . linear_layers = nn . ModuleList ([ linear_0 , linear_1 ]) self . degrees = linear_1 . degrees if torch . all ( self . degrees >= in_degrees ) . item () != 1 : raise RuntimeError ( \"In a masked residual block, the output degrees can't be\" \" less than the corresponding input degrees.\" ) # Activation and dropout self . activation = activation self . dropout = nn . Dropout ( p = dropout_probability ) # Initialization. if zero_initialization : init . uniform_ ( self . linear_layers [ - 1 ] . weight , a =- 1e-3 , b = 1e-3 ) init . uniform_ ( self . linear_layers [ - 1 ] . bias , a =- 1e-3 , b = 1e-3 ) def forward ( self , inputs , context = None ): temps = inputs if self . use_batch_norm : temps = self . batch_norm_layers [ 0 ]( temps ) temps = self . activation ( temps ) temps = self . linear_layers [ 0 ]( temps ) if self . use_batch_norm : temps = self . batch_norm_layers [ 1 ]( temps ) temps = self . activation ( temps ) temps = self . dropout ( temps ) temps = self . linear_layers [ 1 ]( temps ) if context is not None : temps = F . glu ( torch . cat (( temps , self . context_layer ( context )), dim = 1 ), dim = 1 ) return inputs + temps","title":"MaskedResidualBlock"},{"location":"references/#normflows.nets.made_test","text":"Tests for MADE. Code partially taken from https://github.com/bayesiains/nsf","title":"made_test"},{"location":"references/#normflows.nets.mlp","text":"","title":"mlp"},{"location":"references/#normflows.nets.mlp.MLP","text":"Bases: Module A multilayer perceptron with Leaky ReLU nonlinearities Source code in normflows/nets/mlp.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 class MLP ( nn . Module ): \"\"\" A multilayer perceptron with Leaky ReLU nonlinearities \"\"\" def __init__ ( self , layers , leaky = 0.0 , score_scale = None , output_fn = None , output_scale = None , init_zeros = False , dropout = None , ): \"\"\" layers: list of layer sizes from start to end leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used score_scale: Factor to apply to the scores, i.e. output before output_fn. output_fn: String, function to be applied to the output, either None, \"sigmoid\", \"relu\", \"tanh\", or \"clampexp\" output_scale: Rescale outputs if output_fn is specified, i.e. ```scale * output_fn(out / scale)``` init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see [arXiv 1807.03039](https://arxiv.org/abs/1807.03039)) dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done \"\"\" super () . __init__ () net = nn . ModuleList ([]) for k in range ( len ( layers ) - 2 ): net . append ( nn . Linear ( layers [ k ], layers [ k + 1 ])) net . append ( nn . LeakyReLU ( leaky )) if dropout is not None : net . append ( nn . Dropout ( p = dropout )) net . append ( nn . Linear ( layers [ - 2 ], layers [ - 1 ])) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) if output_fn is not None : if score_scale is not None : net . append ( utils . ConstScaleLayer ( score_scale )) if output_fn == \"sigmoid\" : net . append ( nn . Sigmoid ()) elif output_fn == \"relu\" : net . append ( nn . ReLU ()) elif output_fn == \"tanh\" : net . append ( nn . Tanh ()) elif output_fn == \"clampexp\" : net . append ( utils . ClampExp ()) else : NotImplementedError ( \"This output function is not implemented.\" ) if output_scale is not None : net . append ( utils . ConstScaleLayer ( output_scale )) self . net = nn . Sequential ( * net ) def forward ( self , x ): return self . net ( x )","title":"MLP"},{"location":"references/#normflows.nets.mlp.MLP.__init__","text":"layers: list of layer sizes from start to end leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used score_scale: Factor to apply to the scores, i.e. output before output_fn. output_fn: String, function to be applied to the output, either None, \"sigmoid\", \"relu\", \"tanh\", or \"clampexp\" output_scale: Rescale outputs if output_fn is specified, i.e. scale * output_fn(out / scale) init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see arXiv 1807.03039 ) dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done Source code in normflows/nets/mlp.py 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 def __init__ ( self , layers , leaky = 0.0 , score_scale = None , output_fn = None , output_scale = None , init_zeros = False , dropout = None , ): \"\"\" layers: list of layer sizes from start to end leaky: slope of the leaky part of the ReLU, if 0.0, standard ReLU is used score_scale: Factor to apply to the scores, i.e. output before output_fn. output_fn: String, function to be applied to the output, either None, \"sigmoid\", \"relu\", \"tanh\", or \"clampexp\" output_scale: Rescale outputs if output_fn is specified, i.e. ```scale * output_fn(out / scale)``` init_zeros: Flag, if true, weights and biases of last layer are initialized with zeros (helpful for deep models, see [arXiv 1807.03039](https://arxiv.org/abs/1807.03039)) dropout: Float, if specified, dropout is done before last layer; if None, no dropout is done \"\"\" super () . __init__ () net = nn . ModuleList ([]) for k in range ( len ( layers ) - 2 ): net . append ( nn . Linear ( layers [ k ], layers [ k + 1 ])) net . append ( nn . LeakyReLU ( leaky )) if dropout is not None : net . append ( nn . Dropout ( p = dropout )) net . append ( nn . Linear ( layers [ - 2 ], layers [ - 1 ])) if init_zeros : nn . init . zeros_ ( net [ - 1 ] . weight ) nn . init . zeros_ ( net [ - 1 ] . bias ) if output_fn is not None : if score_scale is not None : net . append ( utils . ConstScaleLayer ( score_scale )) if output_fn == \"sigmoid\" : net . append ( nn . Sigmoid ()) elif output_fn == \"relu\" : net . append ( nn . ReLU ()) elif output_fn == \"tanh\" : net . append ( nn . Tanh ()) elif output_fn == \"clampexp\" : net . append ( utils . ClampExp ()) else : NotImplementedError ( \"This output function is not implemented.\" ) if output_scale is not None : net . append ( utils . ConstScaleLayer ( output_scale )) self . net = nn . Sequential ( * net )","title":"__init__"},{"location":"references/#normflows.nets.resnet","text":"","title":"resnet"},{"location":"references/#normflows.nets.resnet.ResidualBlock","text":"Bases: Module A general-purpose residual block. Works only with 1-dim inputs. Source code in normflows/nets/resnet.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 class ResidualBlock ( nn . Module ): \"\"\"A general-purpose residual block. Works only with 1-dim inputs.\"\"\" def __init__ ( self , features , context_features , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , zero_initialization = True , ): super () . __init__ () self . activation = activation self . use_batch_norm = use_batch_norm if use_batch_norm : self . batch_norm_layers = nn . ModuleList ( [ nn . BatchNorm1d ( features , eps = 1e-3 ) for _ in range ( 2 )] ) if context_features is not None : self . context_layer = nn . Linear ( context_features , features ) self . linear_layers = nn . ModuleList ( [ nn . Linear ( features , features ) for _ in range ( 2 )] ) self . dropout = nn . Dropout ( p = dropout_probability ) if zero_initialization : init . uniform_ ( self . linear_layers [ - 1 ] . weight , - 1e-3 , 1e-3 ) init . uniform_ ( self . linear_layers [ - 1 ] . bias , - 1e-3 , 1e-3 ) def forward ( self , inputs , context = None ): temps = inputs if self . use_batch_norm : temps = self . batch_norm_layers [ 0 ]( temps ) temps = self . activation ( temps ) temps = self . linear_layers [ 0 ]( temps ) if self . use_batch_norm : temps = self . batch_norm_layers [ 1 ]( temps ) temps = self . activation ( temps ) temps = self . dropout ( temps ) temps = self . linear_layers [ 1 ]( temps ) if context is not None : temps = F . glu ( torch . cat (( temps , self . context_layer ( context )), dim = 1 ), dim = 1 ) return inputs + temps","title":"ResidualBlock"},{"location":"references/#normflows.nets.resnet.ResidualNet","text":"Bases: Module A general-purpose residual network. Works only with 1-dim inputs. Source code in normflows/nets/resnet.py 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 class ResidualNet ( nn . Module ): \"\"\"A general-purpose residual network. Works only with 1-dim inputs.\"\"\" def __init__ ( self , in_features , out_features , hidden_features , context_features = None , num_blocks = 2 , activation = F . relu , dropout_probability = 0.0 , use_batch_norm = False , preprocessing = None , ): super () . __init__ () self . hidden_features = hidden_features self . context_features = context_features self . preprocessing = preprocessing if context_features is not None : self . initial_layer = nn . Linear ( in_features + context_features , hidden_features ) else : self . initial_layer = nn . Linear ( in_features , hidden_features ) self . blocks = nn . ModuleList ( [ ResidualBlock ( features = hidden_features , context_features = context_features , activation = activation , dropout_probability = dropout_probability , use_batch_norm = use_batch_norm , ) for _ in range ( num_blocks ) ] ) self . final_layer = nn . Linear ( hidden_features , out_features ) def forward ( self , inputs , context = None ): if self . preprocessing is None : temps = inputs else : temps = self . preprocessing ( inputs ) if context is None : temps = self . initial_layer ( temps ) else : temps = self . initial_layer ( torch . cat (( temps , context ), dim = 1 )) for block in self . blocks : temps = block ( temps , context = context ) outputs = self . final_layer ( temps ) return outputs","title":"ResidualNet"},{"location":"references/#normflows.sampling","text":"","title":"sampling"},{"location":"references/#normflows.sampling.hais","text":"","title":"hais"},{"location":"references/#normflows.sampling.hais.HAIS","text":"Class which performs HAIS Source code in normflows/sampling/hais.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 class HAIS : \"\"\" Class which performs HAIS \"\"\" def __init__ ( self , betas , prior , target , num_leapfrog , step_size , log_mass ): \"\"\" Args: betas: Annealing schedule, the jth target is ```f_j(x) = f_0(x)^{\\beta_j} f_n(x)^{1-\\beta_j}``` where the target is proportional to f_0 and the prior is proportional to f_n. The number of intermediate steps is infered from the shape of betas. Should be of the form 1 = \\beta_0 > \\beta_1 > ... > \\beta_n = 0 prior: The prior distribution to start the HAIS chain. target: The target distribution from which we would like to draw weighted samples. num_leapfrog: Number of leapfrog steps in the HMC transitions. step_size: step_size to use for HMC transitions. log_mass: log_mass to use for HMC transitions. \"\"\" self . prior = prior self . target = target self . layers = [] n = betas . shape [ 0 ] - 1 for i in range ( n - 1 , 0 , - 1 ): intermediate_target = distributions . LinearInterpolation ( self . target , self . prior , betas [ i ] ) self . layers += [ flows . HamiltonianMonteCarlo ( intermediate_target , num_leapfrog , torch . log ( step_size ), log_mass ) ] def sample ( self , num_samples ): \"\"\"Run HAIS to draw samples from the target with appropriate weights. Args: num_samples: The number of samples to draw.a \"\"\" samples , log_weights = self . prior . forward ( num_samples ) log_weights = - log_weights for i in range ( len ( self . layers )): samples , log_weights_addition = self . layers [ i ] . forward ( samples ) log_weights += log_weights_addition log_weights += self . target . log_prob ( samples ) return samples , log_weights","title":"HAIS"},{"location":"references/#normflows.sampling.hais.HAIS.__init__","text":"Parameters: Name Type Description Default betas Annealing schedule, the jth target is f_j(x) = f_0(x)^{\beta_j} f_n(x)^{1-\beta_j} where the target is proportional to f_0 and the prior is proportional to f_n. The number of intermediate steps is infered from the shape of betas. Should be of the form 1 = \beta_0 > \beta_1 > ... > \beta_n = 0 required prior The prior distribution to start the HAIS chain. required target The target distribution from which we would like to draw weighted samples. required num_leapfrog Number of leapfrog steps in the HMC transitions. required step_size step_size to use for HMC transitions. required log_mass log_mass to use for HMC transitions. required Source code in normflows/sampling/hais.py 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 def __init__ ( self , betas , prior , target , num_leapfrog , step_size , log_mass ): \"\"\" Args: betas: Annealing schedule, the jth target is ```f_j(x) = f_0(x)^{\\beta_j} f_n(x)^{1-\\beta_j}``` where the target is proportional to f_0 and the prior is proportional to f_n. The number of intermediate steps is infered from the shape of betas. Should be of the form 1 = \\beta_0 > \\beta_1 > ... > \\beta_n = 0 prior: The prior distribution to start the HAIS chain. target: The target distribution from which we would like to draw weighted samples. num_leapfrog: Number of leapfrog steps in the HMC transitions. step_size: step_size to use for HMC transitions. log_mass: log_mass to use for HMC transitions. \"\"\" self . prior = prior self . target = target self . layers = [] n = betas . shape [ 0 ] - 1 for i in range ( n - 1 , 0 , - 1 ): intermediate_target = distributions . LinearInterpolation ( self . target , self . prior , betas [ i ] ) self . layers += [ flows . HamiltonianMonteCarlo ( intermediate_target , num_leapfrog , torch . log ( step_size ), log_mass ) ]","title":"__init__"},{"location":"references/#normflows.sampling.hais.HAIS.sample","text":"Run HAIS to draw samples from the target with appropriate weights. Parameters: Name Type Description Default num_samples The number of samples to draw.a required Source code in normflows/sampling/hais.py 37 38 39 40 41 42 43 44 45 46 47 48 49 def sample ( self , num_samples ): \"\"\"Run HAIS to draw samples from the target with appropriate weights. Args: num_samples: The number of samples to draw.a \"\"\" samples , log_weights = self . prior . forward ( num_samples ) log_weights = - log_weights for i in range ( len ( self . layers )): samples , log_weights_addition = self . layers [ i ] . forward ( samples ) log_weights += log_weights_addition log_weights += self . target . log_prob ( samples ) return samples , log_weights","title":"sample"},{"location":"references/#normflows.transforms","text":"","title":"transforms"},{"location":"references/#normflows.transforms.Logit","text":"Bases: Flow Logit mapping of image tensor, see RealNVP paper logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) Source code in normflows/transforms.py 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 class Logit ( flows . Flow ): \"\"\"Logit mapping of image tensor, see RealNVP paper ``` logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) ``` \"\"\" def __init__ ( self , alpha = 0.05 ): \"\"\"Constructor Args: alpha: Alpha parameter, see above \"\"\" super () . __init__ () self . alpha = alpha def forward ( self , z ): beta = 1 - 2 * self . alpha sum_dims = list ( range ( 1 , z . dim ())) ls = torch . sum ( torch . nn . functional . logsigmoid ( z ), dim = sum_dims ) mls = torch . sum ( torch . nn . functional . logsigmoid ( - z ), dim = sum_dims ) log_det = - np . log ( beta ) * np . prod ([ * z . shape [ 1 :]]) + ls + mls z = ( torch . sigmoid ( z ) - self . alpha ) / beta return z , log_det def inverse ( self , z ): beta = 1 - 2 * self . alpha z = self . alpha + beta * z logz = torch . log ( z ) log1mz = torch . log ( 1 - z ) z = logz - log1mz sum_dims = list ( range ( 1 , z . dim ())) log_det = ( np . log ( beta ) * np . prod ([ * z . shape [ 1 :]]) - torch . sum ( logz , dim = sum_dims ) - torch . sum ( log1mz , dim = sum_dims ) ) return z , log_det","title":"Logit"},{"location":"references/#normflows.transforms.Logit.__init__","text":"Constructor Parameters: Name Type Description Default alpha Alpha parameter, see above 0.05 Source code in normflows/transforms.py 17 18 19 20 21 22 23 24 def __init__ ( self , alpha = 0.05 ): \"\"\"Constructor Args: alpha: Alpha parameter, see above \"\"\" super () . __init__ () self . alpha = alpha","title":"__init__"},{"location":"references/#normflows.transforms.Shift","text":"Bases: Flow Shift data by a fixed constant Default is -0.5 to shift data from interval [0, 1] to [-0.5, 0.5] Source code in normflows/transforms.py 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 class Shift ( flows . Flow ): \"\"\"Shift data by a fixed constant Default is -0.5 to shift data from interval [0, 1] to [-0.5, 0.5] \"\"\" def __init__ ( self , shift =- 0.5 ): \"\"\"Constructor Args: shift: Shift to apply to the data \"\"\" super () . __init__ () self . shift = shift def forward ( self , z ): z -= self . shift log_det = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) return z , log_det def inverse ( self , z ): z += self . shift log_det = torch . zeros ( z . shape [ 0 ], dtype = z . dtype , device = z . device ) return z , log_det","title":"Shift"},{"location":"references/#normflows.transforms.Shift.__init__","text":"Constructor Parameters: Name Type Description Default shift Shift to apply to the data -0.5 Source code in normflows/transforms.py 57 58 59 60 61 62 63 64 def __init__ ( self , shift =- 0.5 ): \"\"\"Constructor Args: shift: Shift to apply to the data \"\"\" super () . __init__ () self . shift = shift","title":"__init__"},{"location":"references/#normflows.transforms_test","text":"","title":"transforms_test"},{"location":"references/#normflows.utils","text":"","title":"utils"},{"location":"references/#normflows.utils.eval","text":"","title":"eval"},{"location":"references/#normflows.utils.eval.bitsPerDim","text":"Computes the bits per dim for a batch of data Parameters: Name Type Description Default model Model to compute bits per dim for required x Batch of data required y Class labels for batch of data if base distribution is class conditional None trans Transformation to be applied to images during training 'logit' trans_param List of parameters of the transformation [0.05] Returns: Type Description Bits per dim for data batch under model Source code in normflows/utils/eval.py 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 def bitsPerDim ( model , x , y = None , trans = \"logit\" , trans_param = [ 0.05 ]): \"\"\"Computes the bits per dim for a batch of data Args: model: Model to compute bits per dim for x: Batch of data y: Class labels for batch of data if base distribution is class conditional trans: Transformation to be applied to images during training trans_param: List of parameters of the transformation Returns: Bits per dim for data batch under model \"\"\" dims = torch . prod ( torch . tensor ( x . size ()[ 1 :])) if trans == \"logit\" : if y is None : log_q = model . log_prob ( x ) else : log_q = model . log_prob ( x , y ) sum_dims = list ( range ( 1 , x . dim ())) ls = torch . nn . LogSigmoid () sig_ = torch . sum ( ls ( x ) / np . log ( 2 ), sum_dims ) sig_ += torch . sum ( ls ( - x ) / np . log ( 2 ), sum_dims ) b = - log_q / dims / np . log ( 2 ) - np . log2 ( 1 - trans_param [ 0 ]) + 8 b += sig_ / dims else : raise NotImplementedError ( \"The transformation \" + trans + \" is not implemented.\" ) return b","title":"bitsPerDim"},{"location":"references/#normflows.utils.eval.bitsPerDimDataset","text":"Computes average bits per dim for an entire dataset given by a data loader Parameters: Name Type Description Default model Model to compute bits per dim for required data_loader Data loader of dataset required class_cond Flag indicating whether model is class_conditional True trans Transformation to be applied to images during training 'logit' trans_param List of parameters of the transformation [0.05] Returns: Type Description Average bits per dim for dataset Source code in normflows/utils/eval.py 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 def bitsPerDimDataset ( model , data_loader , class_cond = True , trans = \"logit\" , trans_param = [ 0.05 ] ): \"\"\"Computes average bits per dim for an entire dataset given by a data loader Args: model: Model to compute bits per dim for data_loader: Data loader of dataset class_cond: Flag indicating whether model is class_conditional trans: Transformation to be applied to images during training trans_param: List of parameters of the transformation Returns: Average bits per dim for dataset \"\"\" n = 0 b_cum = 0 with torch . no_grad (): for x , y in iter ( data_loader ): b_ = bitsPerDim ( model , x , y . to ( x . device ) if class_cond else None , trans , trans_param ) b_np = b_ . to ( \"cpu\" ) . numpy () b_cum += np . nansum ( b_np ) n += len ( x ) - np . sum ( np . isnan ( b_np )) b = b_cum / n return b","title":"bitsPerDimDataset"},{"location":"references/#normflows.utils.masks","text":"","title":"masks"},{"location":"references/#normflows.utils.masks.create_alternating_binary_mask","text":"Creates a binary mask of a given dimension which alternates its masking. Parameters: Name Type Description Default features Dimension of mask. required even If True, even values are assigned 1s, odd 0s. If False, vice versa. True Returns: Type Description Alternating binary mask of type torch.Tensor. Source code in normflows/utils/masks.py 4 5 6 7 8 9 10 11 12 13 14 15 16 17 def create_alternating_binary_mask ( features , even = True ): \"\"\"Creates a binary mask of a given dimension which alternates its masking. Args: features: Dimension of mask. even: If True, even values are assigned 1s, odd 0s. If False, vice versa. Returns: Alternating binary mask of type torch.Tensor. \"\"\" mask = torch . zeros ( features ) . byte () start = 0 if even else 1 mask [ start :: 2 ] += 1 return mask","title":"create_alternating_binary_mask"},{"location":"references/#normflows.utils.masks.create_mid_split_binary_mask","text":"Creates a binary mask of a given dimension which splits its masking at the midpoint. Parameters: Name Type Description Default features Dimension of mask. required Returns: Type Description Binary mask split at midpoint of type torch.Tensor Source code in normflows/utils/masks.py 20 21 22 23 24 25 26 27 28 29 30 31 32 def create_mid_split_binary_mask ( features ): \"\"\"Creates a binary mask of a given dimension which splits its masking at the midpoint. Args: features: Dimension of mask. Returns: Binary mask split at midpoint of type torch.Tensor \"\"\" mask = torch . zeros ( features ) . byte () midpoint = features // 2 if features % 2 == 0 else features // 2 + 1 mask [: midpoint ] += 1 return mask","title":"create_mid_split_binary_mask"},{"location":"references/#normflows.utils.masks.create_random_binary_mask","text":"Creates a random binary mask of a given dimension with half of its entries randomly set to 1s. Parameters: Name Type Description Default features Dimension of mask. required seed Seed to be used None Returns: Type Description Binary mask with half of its entries set to 1s, of type torch.Tensor. Source code in normflows/utils/masks.py 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 def create_random_binary_mask ( features , seed = None ): \"\"\"Creates a random binary mask of a given dimension with half of its entries randomly set to 1s. Args: features: Dimension of mask. seed: Seed to be used Returns: Binary mask with half of its entries set to 1s, of type torch.Tensor. \"\"\" mask = torch . zeros ( features ) . byte () weights = torch . ones ( features ) . float () num_samples = features // 2 if features % 2 == 0 else features // 2 + 1 if seed is None : generator = None else : generator = torch . Generator () generator . manual_seed ( seed ) indices = torch . multinomial ( input = weights , num_samples = num_samples , replacement = False , generator = generator ) mask [ indices ] += 1 return mask","title":"create_random_binary_mask"},{"location":"references/#normflows.utils.nn","text":"","title":"nn"},{"location":"references/#normflows.utils.nn.ActNorm","text":"Bases: Module ActNorm layer with just one forward pass Source code in normflows/utils/nn.py 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 class ActNorm ( nn . Module ): \"\"\" ActNorm layer with just one forward pass \"\"\" def __init__ ( self , shape ): \"\"\"Constructor Args: shape: Same as shape in flows.ActNorm logscale_factor: Same as shape in flows.ActNorm \"\"\" super () . __init__ () self . actNorm = flows . ActNorm ( shape ) def forward ( self , input ): out , _ = self . actNorm ( input ) return out","title":"ActNorm"},{"location":"references/#normflows.utils.nn.ActNorm.__init__","text":"Constructor Parameters: Name Type Description Default shape Same as shape in flows.ActNorm required logscale_factor Same as shape in flows.ActNorm required Source code in normflows/utils/nn.py 30 31 32 33 34 35 36 37 38 39 def __init__ ( self , shape ): \"\"\"Constructor Args: shape: Same as shape in flows.ActNorm logscale_factor: Same as shape in flows.ActNorm \"\"\" super () . __init__ () self . actNorm = flows . ActNorm ( shape )","title":"__init__"},{"location":"references/#normflows.utils.nn.ClampExp","text":"Bases: Module Nonlinearity min(exp(lam * x), 1) Source code in normflows/utils/nn.py 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 class ClampExp ( nn . Module ): \"\"\" Nonlinearity min(exp(lam * x), 1) \"\"\" def __init__ ( self ): \"\"\"Constructor Args: lam: Lambda parameter \"\"\" super ( ClampExp , self ) . __init__ () def forward ( self , x ): one = torch . tensor ( 1.0 , device = x . device , dtype = x . dtype ) return torch . min ( torch . exp ( x ), one )","title":"ClampExp"},{"location":"references/#normflows.utils.nn.ClampExp.__init__","text":"Constructor Parameters: Name Type Description Default lam Lambda parameter required Source code in normflows/utils/nn.py 51 52 53 54 55 56 57 def __init__ ( self ): \"\"\"Constructor Args: lam: Lambda parameter \"\"\" super ( ClampExp , self ) . __init__ ()","title":"__init__"},{"location":"references/#normflows.utils.nn.ConstScaleLayer","text":"Bases: Module Scaling features by a fixed factor Source code in normflows/utils/nn.py 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 class ConstScaleLayer ( nn . Module ): \"\"\" Scaling features by a fixed factor \"\"\" def __init__ ( self , scale = 1.0 ): \"\"\"Constructor Args: scale: Scale to apply to features \"\"\" super () . __init__ () self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu ) def forward ( self , input ): return input * self . scale","title":"ConstScaleLayer"},{"location":"references/#normflows.utils.nn.ConstScaleLayer.__init__","text":"Constructor Parameters: Name Type Description Default scale Scale to apply to features 1.0 Source code in normflows/utils/nn.py 12 13 14 15 16 17 18 19 20 def __init__ ( self , scale = 1.0 ): \"\"\"Constructor Args: scale: Scale to apply to features \"\"\" super () . __init__ () self . scale_cpu = torch . tensor ( scale ) self . register_buffer ( \"scale\" , self . scale_cpu )","title":"__init__"},{"location":"references/#normflows.utils.nn.PeriodicFeaturesCat","text":"Bases: Module Converts a specified part of the input to periodic features by replacing those features f with [sin(scale * f), cos(scale * f)]. Note that this decreases the number of features and their order is changed. Source code in normflows/utils/nn.py 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 class PeriodicFeaturesCat ( nn . Module ): \"\"\" Converts a specified part of the input to periodic features by replacing those features f with [sin(scale * f), cos(scale * f)]. Note that this decreases the number of features and their order is changed. \"\"\" def __init__ ( self , ndim , ind , scale = 1.0 ): \"\"\" Constructor :param ndim: Int, number of dimensions :param ind: Iterable, indices of input elements to convert to periodic features :param scale: Scalar or iterable, used to scale inputs before converting them to periodic features \"\"\" super ( PeriodicFeaturesCat , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale def forward ( self , inputs ): inputs_ = inputs [ ... , self . ind ] inputs_ = self . scale * inputs_ inputs_sin = torch . sin ( inputs_ ) inputs_cos = torch . cos ( inputs_ ) out = torch . cat (( inputs_sin , inputs_cos , inputs [ ... , self . ind_ ]), - 1 ) return out","title":"PeriodicFeaturesCat"},{"location":"references/#normflows.utils.nn.PeriodicFeaturesCat.__init__","text":"Constructor :param ndim: Int, number of dimensions :param ind: Iterable, indices of input elements to convert to periodic features :param scale: Scalar or iterable, used to scale inputs before converting them to periodic features Source code in normflows/utils/nn.py 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 def __init__ ( self , ndim , ind , scale = 1.0 ): \"\"\" Constructor :param ndim: Int, number of dimensions :param ind: Iterable, indices of input elements to convert to periodic features :param scale: Scalar or iterable, used to scale inputs before converting them to periodic features \"\"\" super ( PeriodicFeaturesCat , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale","title":"__init__"},{"location":"references/#normflows.utils.nn.PeriodicFeaturesElementwise","text":"Bases: Module Converts a specified part of the input to periodic features by replacing those features f with w1 * sin(scale * f) + w2 * cos(scale * f). Note that this operation is done elementwise and, therefore, some information about the feature can be lost. Source code in normflows/utils/nn.py 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 class PeriodicFeaturesElementwise ( nn . Module ): \"\"\" Converts a specified part of the input to periodic features by replacing those features f with w1 * sin(scale * f) + w2 * cos(scale * f). Note that this operation is done elementwise and, therefore, some information about the feature can be lost. \"\"\" def __init__ ( self , ndim , ind , scale = 1.0 , bias = False , activation = None ): \"\"\"Constructor Args: ndim (int): number of dimensions ind (iterable): indices of input elements to convert to periodic features scale: Scalar or iterable, used to scale inputs before converting them to periodic features bias: Flag, whether to add a bias activation: Function or None, activation function to be applied \"\"\" super ( PeriodicFeaturesElementwise , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) self . weights = nn . Parameter ( torch . ones ( len ( self . ind ), 2 )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale self . apply_bias = bias if self . apply_bias : self . bias = nn . Parameter ( torch . zeros ( len ( self . ind ))) if activation is None : self . activation = torch . nn . Identity () else : self . activation = activation def forward ( self , inputs ): inputs_ = inputs [ ... , self . ind ] inputs_ = self . scale * inputs_ inputs_ = self . weights [:, 0 ] * torch . sin ( inputs_ ) + self . weights [ :, 1 ] * torch . cos ( inputs_ ) if self . apply_bias : inputs_ = inputs_ + self . bias inputs_ = self . activation ( inputs_ ) out = torch . cat (( inputs_ , inputs [ ... , self . ind_ ]), - 1 ) return out [ ... , self . inv_perm ]","title":"PeriodicFeaturesElementwise"},{"location":"references/#normflows.utils.nn.PeriodicFeaturesElementwise.__init__","text":"Constructor Parameters: Name Type Description Default ndim int number of dimensions required ind iterable indices of input elements to convert to periodic features required scale Scalar or iterable, used to scale inputs before converting them to periodic features 1.0 bias Flag, whether to add a bias False activation Function or None, activation function to be applied None Source code in normflows/utils/nn.py 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 def __init__ ( self , ndim , ind , scale = 1.0 , bias = False , activation = None ): \"\"\"Constructor Args: ndim (int): number of dimensions ind (iterable): indices of input elements to convert to periodic features scale: Scalar or iterable, used to scale inputs before converting them to periodic features bias: Flag, whether to add a bias activation: Function or None, activation function to be applied \"\"\" super ( PeriodicFeaturesElementwise , self ) . __init__ () # Set up indices and permutations self . ndim = ndim if torch . is_tensor ( ind ): self . register_buffer ( \"ind\" , torch . _cast_Long ( ind )) else : self . register_buffer ( \"ind\" , torch . tensor ( ind , dtype = torch . long )) ind_ = [] for i in range ( self . ndim ): if not i in self . ind : ind_ += [ i ] self . register_buffer ( \"ind_\" , torch . tensor ( ind_ , dtype = torch . long )) perm_ = torch . cat (( self . ind , self . ind_ )) inv_perm_ = torch . zeros_like ( perm_ ) for i in range ( self . ndim ): inv_perm_ [ perm_ [ i ]] = i self . register_buffer ( \"inv_perm\" , inv_perm_ ) self . weights = nn . Parameter ( torch . ones ( len ( self . ind ), 2 )) if torch . is_tensor ( scale ): self . register_buffer ( \"scale\" , scale ) else : self . scale = scale self . apply_bias = bias if self . apply_bias : self . bias = nn . Parameter ( torch . zeros ( len ( self . ind ))) if activation is None : self . activation = torch . nn . Identity () else : self . activation = activation","title":"__init__"},{"location":"references/#normflows.utils.nn.sum_except_batch","text":"Sums all elements of x except for the first num_batch_dims dimensions. Source code in normflows/utils/nn.py 190 191 192 193 def sum_except_batch ( x , num_batch_dims = 1 ): \"\"\"Sums all elements of `x` except for the first `num_batch_dims` dimensions.\"\"\" reduce_dims = list ( range ( num_batch_dims , x . ndimension ())) return torch . sum ( x , dim = reduce_dims )","title":"sum_except_batch"},{"location":"references/#normflows.utils.optim","text":"","title":"optim"},{"location":"references/#normflows.utils.optim.clear_grad","text":"Set gradients of model parameter to None as this speeds up training, See youtube Parameters: Name Type Description Default model Model to clear gradients of required Source code in normflows/utils/optim.py 16 17 18 19 20 21 22 23 24 25 def clear_grad ( model ): \"\"\"Set gradients of model parameter to None as this speeds up training, See [youtube](https://www.youtube.com/watch?v=9mS1fIYj1So) Args: model: Model to clear gradients of \"\"\" for param in model . parameters (): param . grad = None","title":"clear_grad"},{"location":"references/#normflows.utils.optim.set_requires_grad","text":"Sets requires_grad flag of all parameters of a torch.nn.module Parameters: Name Type Description Default module torch.nn.module required flag Flag to set requires_grad to required Source code in normflows/utils/optim.py 4 5 6 7 8 9 10 11 12 13 def set_requires_grad ( module , flag ): \"\"\"Sets requires_grad flag of all parameters of a torch.nn.module Args: module: torch.nn.module flag: Flag to set requires_grad to \"\"\" for param in module . parameters (): param . requires_grad = flag","title":"set_requires_grad"},{"location":"references/#normflows.utils.preprocessing","text":"","title":"preprocessing"},{"location":"references/#normflows.utils.preprocessing.Jitter","text":"Transform for dataloader, adds uniform jitter noise to data Source code in normflows/utils/preprocessing.py 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 class Jitter : \"\"\"Transform for dataloader, adds uniform jitter noise to data\"\"\" def __init__ ( self , scale = 1.0 / 256 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale def __call__ ( self , x ): eps = torch . rand_like ( x ) * self . scale x_ = x + eps return x_","title":"Jitter"},{"location":"references/#normflows.utils.preprocessing.Jitter.__init__","text":"Constructor Parameters: Name Type Description Default scale Scaling factor for noise 1.0 / 256 Source code in normflows/utils/preprocessing.py 31 32 33 34 35 36 37 def __init__ ( self , scale = 1.0 / 256 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale","title":"__init__"},{"location":"references/#normflows.utils.preprocessing.Logit","text":"Transform for dataloader logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) Source code in normflows/utils/preprocessing.py 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 class Logit : \"\"\"Transform for dataloader ``` logit(alpha + (1 - alpha) * x) where logit(x) = log(x / (1 - x)) ``` \"\"\" def __init__ ( self , alpha = 0 ): \"\"\"Constructor Args: alpha: see above \"\"\" self . alpha = alpha def __call__ ( self , x ): x_ = self . alpha + ( 1 - self . alpha ) * x return torch . log ( x_ / ( 1 - x_ )) def inverse ( self , x ): return ( torch . sigmoid ( x ) - self . alpha ) / ( 1 - self . alpha )","title":"Logit"},{"location":"references/#normflows.utils.preprocessing.Logit.__init__","text":"Constructor Parameters: Name Type Description Default alpha see above 0 Source code in normflows/utils/preprocessing.py 12 13 14 15 16 17 18 def __init__ ( self , alpha = 0 ): \"\"\"Constructor Args: alpha: see above \"\"\" self . alpha = alpha","title":"__init__"},{"location":"references/#normflows.utils.preprocessing.Scale","text":"Transform for dataloader, adds uniform jitter noise to data Source code in normflows/utils/preprocessing.py 45 46 47 48 49 50 51 52 53 54 55 56 57 class Scale : \"\"\"Transform for dataloader, adds uniform jitter noise to data\"\"\" def __init__ ( self , scale = 255.0 / 256.0 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale def __call__ ( self , x ): return x * self . scale","title":"Scale"},{"location":"references/#normflows.utils.preprocessing.Scale.__init__","text":"Constructor Parameters: Name Type Description Default scale Scaling factor for noise 255.0 / 256.0 Source code in normflows/utils/preprocessing.py 48 49 50 51 52 53 54 def __init__ ( self , scale = 255.0 / 256.0 ): \"\"\"Constructor Args: scale: Scaling factor for noise \"\"\" self . scale = scale","title":"__init__"},{"location":"examples/augmented_flow/","text":"(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : typeof define === 'function' && define.amd ? define(factory) : (global = global || self, global.ClipboardCopyElement = factory()); }(this, function () { 'use strict'; function createNode(text) { const node = document.createElement('pre'); node.style.width = '1px'; node.style.height = '1px'; node.style.position = 'fixed'; node.style.top = '5px'; node.textContent = text; return node; } function copyNode(node) { if ('clipboard' in navigator) { // eslint-disable-next-line flowtype/no-flow-fix-me-comments // $FlowFixMe Clipboard is not defined in Flow yet. return navigator.clipboard.writeText(node.textContent); } const selection = getSelection(); if (selection == null) { return Promise.reject(new Error()); } selection.removeAllRanges(); const range = document.createRange(); range.selectNodeContents(node); selection.addRange(range); document.execCommand('copy'); selection.removeAllRanges(); return Promise.resolve(); } function copyText(text) { if ('clipboard' in navigator) { // eslint-disable-next-line flowtype/no-flow-fix-me-comments // $FlowFixMe Clipboard is not defined in Flow yet. return navigator.clipboard.writeText(text); } const body = document.body; if (!body) { return Promise.reject(new Error()); } const node = createNode(text); body.appendChild(node); copyNode(node); body.removeChild(node); return Promise.resolve(); } function copy(button) { const id = button.getAttribute('for'); const text = button.getAttribute('value'); function trigger() { button.dispatchEvent(new CustomEvent('clipboard-copy', { bubbles: true })); } if (text) { copyText(text).then(trigger); } else if (id) { const root = 'getRootNode' in Element.prototype ? button.getRootNode() : button.ownerDocument; if (!(root instanceof Document || 'ShadowRoot' in window && root instanceof ShadowRoot)) return; const node = root.getElementById(id); if (node) copyTarget(node).then(trigger); } } function copyTarget(content) { if (content instanceof HTMLInputElement || content instanceof HTMLTextAreaElement) { return copyText(content.value); } else if (content instanceof HTMLAnchorElement && content.hasAttribute('href')) { return copyText(content.href); } else { return copyNode(content); } } function clicked(event) { const button = event.currentTarget; if (button instanceof HTMLElement) { copy(button); } } function keydown(event) { if (event.key === ' ' || event.key === 'Enter') { const button = event.currentTarget; if (button instanceof HTMLElement) { event.preventDefault(); copy(button); } } } function focused(event) { event.currentTarget.addEventListener('keydown', keydown); } function blurred(event) { event.currentTarget.removeEventListener('keydown', keydown); } class ClipboardCopyElement extends HTMLElement { constructor() { super(); this.addEventListener('click', clicked); this.addEventListener('focus', focused); this.addEventListener('blur', blurred); } connectedCallback() { if (!this.hasAttribute('tabindex')) { this.setAttribute('tabindex', '0'); } if (!this.hasAttribute('role')) { this.setAttribute('role', 'button'); } } get value() { return this.getAttribute('value') || ''; } set value(text) { this.setAttribute('value', text); } } if (!window.customElements.get('clipboard-copy')) { window.ClipboardCopyElement = ClipboardCopyElement; window.customElements.define('clipboard-copy', ClipboardCopyElement); } return ClipboardCopyElement; })); document.addEventListener('clipboard-copy', function(event) { const notice = event.target.querySelector('.notice') notice.hidden = false setTimeout(function() { notice.hidden = true }, 1000) }) pre { line-height: 125%; } td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } .highlight-ipynb .hll { background-color: var(--jp-cell-editor-active-background) } .highlight-ipynb { background: var(--jp-cell-editor-background); color: var(--jp-mirror-editor-variable-color) } .highlight-ipynb .c { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment */ .highlight-ipynb .err { color: var(--jp-mirror-editor-error-color) } /* Error */ .highlight-ipynb .k { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword */ .highlight-ipynb .o { color: var(--jp-mirror-editor-operator-color); font-weight: bold } /* Operator */ .highlight-ipynb .p { color: var(--jp-mirror-editor-punctuation-color) } /* Punctuation */ .highlight-ipynb .ch { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment.Hashbang */ .highlight-ipynb .cm { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment.Multiline */ .highlight-ipynb .cp { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment.Preproc */ .highlight-ipynb .cpf { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment.PreprocFile */ .highlight-ipynb .c1 { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment.Single */ .highlight-ipynb .cs { color: var(--jp-mirror-editor-comment-color); font-style: italic } /* Comment.Special */ .highlight-ipynb .kc { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword.Constant */ .highlight-ipynb .kd { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword.Declaration */ .highlight-ipynb .kn { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword.Namespace */ .highlight-ipynb .kp { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword.Pseudo */ .highlight-ipynb .kr { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword.Reserved */ .highlight-ipynb .kt { color: var(--jp-mirror-editor-keyword-color); font-weight: bold } /* Keyword.Type */ .highlight-ipynb .m { color: var(--jp-mirror-editor-number-color) } /* Literal.Number */ .highlight-ipynb .s { color: var(--jp-mirror-editor-string-color) } /* Literal.String */ .highlight-ipynb .ow { color: var(--jp-mirror-editor-operator-color); font-weight: bold } /* Operator.Word */ .highlight-ipynb .pm { color: var(--jp-mirror-editor-punctuation-color) } /* Punctuation.Marker */ .highlight-ipynb .w { color: var(--jp-mirror-editor-variable-color) } /* Text.Whitespace */ .highlight-ipynb .mb { color: var(--jp-mirror-editor-number-color) } /* Literal.Number.Bin */ .highlight-ipynb .mf { color: var(--jp-mirror-editor-number-color) } /* Literal.Number.Float */ .highlight-ipynb .mh { color: var(--jp-mirror-editor-number-color) } /* Literal.Number.Hex */ .highlight-ipynb .mi { color: var(--jp-mirror-editor-number-color) } /* Literal.Number.Integer */ .highlight-ipynb .mo { color: var(--jp-mirror-editor-number-color) } /* Literal.Number.Oct */ .highlight-ipynb .sa { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Affix */ .highlight-ipynb .sb { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Backtick */ .highlight-ipynb .sc { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Char */ .highlight-ipynb .dl { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Delimiter */ .highlight-ipynb .sd { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Doc */ .highlight-ipynb .s2 { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Double */ .highlight-ipynb .se { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Escape */ .highlight-ipynb .sh { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Heredoc */ .highlight-ipynb .si { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Interpol */ .highlight-ipynb .sx { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Other */ .highlight-ipynb .sr { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Regex */ .highlight-ipynb .s1 { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Single */ .highlight-ipynb .ss { color: var(--jp-mirror-editor-string-color) } /* Literal.String.Symbol */ .highlight-ipynb .il { color: var(--jp-mirror-editor-number-color) } /* Literal.Number.Integer.Long */ @charset \"UTF-8\";.jupyter-wrapper{--md-red-50: #ffebee;--md-red-100: #ffcdd2;--md-red-200: #ef9a9a;--md-red-300: #e57373;--md-red-400: #ef5350;--md-red-500: #f44336;--md-red-600: #e53935;--md-red-700: #d32f2f;--md-red-800: #c62828;--md-red-900: #b71c1c;--md-red-A100: #ff8a80;--md-red-A200: #ff5252;--md-red-A400: #ff1744;--md-red-A700: #d50000;--md-pink-50: #fce4ec;--md-pink-100: #f8bbd0;--md-pink-200: #f48fb1;--md-pink-300: #f06292;--md-pink-400: #ec407a;--md-pink-500: #e91e63;--md-pink-600: #d81b60;--md-pink-700: #c2185b;--md-pink-800: #ad1457;--md-pink-900: #880e4f;--md-pink-A100: #ff80ab;--md-pink-A200: #ff4081;--md-pink-A400: #f50057;--md-pink-A700: #c51162;--md-purple-50: #f3e5f5;--md-purple-100: #e1bee7;--md-purple-200: #ce93d8;--md-purple-300: #ba68c8;--md-purple-400: #ab47bc;--md-purple-500: #9c27b0;--md-purple-600: #8e24aa;--md-purple-700: #7b1fa2;--md-purple-800: #6a1b9a;--md-purple-900: #4a148c;--md-purple-A100: #ea80fc;--md-purple-A200: #e040fb;--md-purple-A400: #d500f9;--md-purple-A700: #aa00ff;--md-deep-purple-50: #ede7f6;--md-deep-purple-100: #d1c4e9;--md-deep-purple-200: #b39ddb;--md-deep-purple-300: #9575cd;--md-deep-purple-400: #7e57c2;--md-deep-purple-500: #673ab7;--md-deep-purple-600: #5e35b1;--md-deep-purple-700: #512da8;--md-deep-purple-800: #4527a0;--md-deep-purple-900: #311b92;--md-deep-purple-A100: #b388ff;--md-deep-purple-A200: #7c4dff;--md-deep-purple-A400: #651fff;--md-deep-purple-A700: #6200ea;--md-indigo-50: #e8eaf6;--md-indigo-100: #c5cae9;--md-indigo-200: #9fa8da;--md-indigo-300: #7986cb;--md-indigo-400: #5c6bc0;--md-indigo-500: #3f51b5;--md-indigo-600: #3949ab;--md-indigo-700: #303f9f;--md-indigo-800: #283593;--md-indigo-900: #1a237e;--md-indigo-A100: #8c9eff;--md-indigo-A200: #536dfe;--md-indigo-A400: #3d5afe;--md-indigo-A700: #304ffe;--md-blue-50: #e3f2fd;--md-blue-100: #bbdefb;--md-blue-200: #90caf9;--md-blue-300: #64b5f6;--md-blue-400: #42a5f5;--md-blue-500: #2196f3;--md-blue-600: #1e88e5;--md-blue-700: #1976d2;--md-blue-800: #1565c0;--md-blue-900: #0d47a1;--md-blue-A100: #82b1ff;--md-blue-A200: #448aff;--md-blue-A400: #2979ff;--md-blue-A700: #2962ff;--md-light-blue-50: #e1f5fe;--md-light-blue-100: #b3e5fc;--md-light-blue-200: #81d4fa;--md-light-blue-300: #4fc3f7;--md-light-blue-400: #29b6f6;--md-light-blue-500: #03a9f4;--md-light-blue-600: #039be5;--md-light-blue-700: #0288d1;--md-light-blue-800: #0277bd;--md-light-blue-900: #01579b;--md-light-blue-A100: #80d8ff;--md-light-blue-A200: #40c4ff;--md-light-blue-A400: #00b0ff;--md-light-blue-A700: #0091ea;--md-cyan-50: #e0f7fa;--md-cyan-100: #b2ebf2;--md-cyan-200: #80deea;--md-cyan-300: #4dd0e1;--md-cyan-400: #26c6da;--md-cyan-500: #00bcd4;--md-cyan-600: #00acc1;--md-cyan-700: #0097a7;--md-cyan-800: #00838f;--md-cyan-900: #006064;--md-cyan-A100: #84ffff;--md-cyan-A200: #18ffff;--md-cyan-A400: #00e5ff;--md-cyan-A700: #00b8d4;--md-teal-50: #e0f2f1;--md-teal-100: #b2dfdb;--md-teal-200: #80cbc4;--md-teal-300: #4db6ac;--md-teal-400: #26a69a;--md-teal-500: #009688;--md-teal-600: #00897b;--md-teal-700: #00796b;--md-teal-800: #00695c;--md-teal-900: #004d40;--md-teal-A100: #a7ffeb;--md-teal-A200: #64ffda;--md-teal-A400: #1de9b6;--md-teal-A700: #00bfa5;--md-green-50: #e8f5e9;--md-green-100: #c8e6c9;--md-green-200: #a5d6a7;--md-green-300: #81c784;--md-green-400: #66bb6a;--md-green-500: #4caf50;--md-green-600: #43a047;--md-green-700: #388e3c;--md-green-800: #2e7d32;--md-green-900: #1b5e20;--md-green-A100: #b9f6ca;--md-green-A200: #69f0ae;--md-green-A400: #00e676;--md-green-A700: #00c853;--md-light-green-50: #f1f8e9;--md-light-green-100: #dcedc8;--md-light-green-200: #c5e1a5;--md-light-green-300: #aed581;--md-light-green-400: #9ccc65;--md-light-green-500: #8bc34a;--md-light-green-600: #7cb342;--md-light-green-700: #689f38;--md-light-green-800: #558b2f;--md-light-green-900: #33691e;--md-light-green-A100: #ccff90;--md-light-green-A200: #b2ff59;--md-light-green-A400: #76ff03;--md-light-green-A700: #64dd17;--md-lime-50: #f9fbe7;--md-lime-100: #f0f4c3;--md-lime-200: #e6ee9c;--md-lime-300: #dce775;--md-lime-400: #d4e157;--md-lime-500: #cddc39;--md-lime-600: #c0ca33;--md-lime-700: #afb42b;--md-lime-800: #9e9d24;--md-lime-900: #827717;--md-lime-A100: #f4ff81;--md-lime-A200: #eeff41;--md-lime-A400: #c6ff00;--md-lime-A700: #aeea00;--md-yellow-50: #fffde7;--md-yellow-100: #fff9c4;--md-yellow-200: #fff59d;--md-yellow-300: #fff176;--md-yellow-400: #ffee58;--md-yellow-500: #ffeb3b;--md-yellow-600: #fdd835;--md-yellow-700: #fbc02d;--md-yellow-800: #f9a825;--md-yellow-900: #f57f17;--md-yellow-A100: #ffff8d;--md-yellow-A200: #ffff00;--md-yellow-A400: #ffea00;--md-yellow-A700: #ffd600;--md-amber-50: #fff8e1;--md-amber-100: #ffecb3;--md-amber-200: #ffe082;--md-amber-300: #ffd54f;--md-amber-400: #ffca28;--md-amber-500: #ffc107;--md-amber-600: #ffb300;--md-amber-700: #ffa000;--md-amber-800: #ff8f00;--md-amber-900: #ff6f00;--md-amber-A100: #ffe57f;--md-amber-A200: #ffd740;--md-amber-A400: #ffc400;--md-amber-A700: #ffab00;--md-orange-50: #fff3e0;--md-orange-100: #ffe0b2;--md-orange-200: #ffcc80;--md-orange-300: #ffb74d;--md-orange-400: #ffa726;--md-orange-500: #ff9800;--md-orange-600: #fb8c00;--md-orange-700: #f57c00;--md-orange-800: #ef6c00;--md-orange-900: #e65100;--md-orange-A100: #ffd180;--md-orange-A200: #ffab40;--md-orange-A400: #ff9100;--md-orange-A700: #ff6d00;--md-deep-orange-50: #fbe9e7;--md-deep-orange-100: #ffccbc;--md-deep-orange-200: #ffab91;--md-deep-orange-300: #ff8a65;--md-deep-orange-400: #ff7043;--md-deep-orange-500: #ff5722;--md-deep-orange-600: #f4511e;--md-deep-orange-700: #e64a19;--md-deep-orange-800: #d84315;--md-deep-orange-900: #bf360c;--md-deep-orange-A100: #ff9e80;--md-deep-orange-A200: #ff6e40;--md-deep-orange-A400: #ff3d00;--md-deep-orange-A700: #dd2c00;--md-brown-50: #efebe9;--md-brown-100: #d7ccc8;--md-brown-200: #bcaaa4;--md-brown-300: #a1887f;--md-brown-400: #8d6e63;--md-brown-500: #795548;--md-brown-600: #6d4c41;--md-brown-700: #5d4037;--md-brown-800: #4e342e;--md-brown-900: #3e2723;--md-grey-50: #fafafa;--md-grey-100: #f5f5f5;--md-grey-200: #eeeeee;--md-grey-300: #e0e0e0;--md-grey-400: #bdbdbd;--md-grey-500: #9e9e9e;--md-grey-600: #757575;--md-grey-700: #616161;--md-grey-800: #424242;--md-grey-900: #212121;--md-blue-grey-50: #eceff1;--md-blue-grey-100: #cfd8dc;--md-blue-grey-200: #b0bec5;--md-blue-grey-300: #90a4ae;--md-blue-grey-400: #78909c;--md-blue-grey-500: #607d8b;--md-blue-grey-600: #546e7a;--md-blue-grey-700: #455a64;--md-blue-grey-800: #37474f;--md-blue-grey-900: #263238}.jupyter-wrapper{--jp-shadow-base-lightness: 0;--jp-shadow-umbra-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .2 );--jp-shadow-penumbra-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .14 );--jp-shadow-ambient-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .12 );--jp-elevation-z0: none;--jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color), 0px 1px 1px 0px var(--jp-shadow-penumbra-color), 0px 1px 3px 0px var(--jp-shadow-ambient-color);--jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color), 0px 2px 2px 0px var(--jp-shadow-penumbra-color), 0px 1px 5px 0px var(--jp-shadow-ambient-color);--jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color), 0px 4px 5px 0px var(--jp-shadow-penumbra-color), 0px 1px 10px 0px var(--jp-shadow-ambient-color);--jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color), 0px 6px 10px 0px var(--jp-shadow-penumbra-color), 0px 1px 18px 0px var(--jp-shadow-ambient-color);--jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color), 0px 8px 10px 1px var(--jp-shadow-penumbra-color), 0px 3px 14px 2px var(--jp-shadow-ambient-color);--jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color), 0px 12px 17px 2px var(--jp-shadow-penumbra-color), 0px 5px 22px 4px var(--jp-shadow-ambient-color);--jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color), 0px 16px 24px 2px var(--jp-shadow-penumbra-color), 0px 6px 30px 5px var(--jp-shadow-ambient-color);--jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color), 0px 20px 31px 3px var(--jp-shadow-penumbra-color), 0px 8px 38px 7px var(--jp-shadow-ambient-color);--jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color), 0px 24px 38px 3px var(--jp-shadow-penumbra-color), 0px 9px 46px 8px var(--jp-shadow-ambient-color);--jp-border-width: 1px;--jp-border-color0: var(--md-grey-400);--jp-border-color1: var(--md-grey-400);--jp-border-color2: var(--md-grey-300);--jp-border-color3: var(--md-grey-200);--jp-inverse-border-color: var(--md-grey-600);--jp-border-radius: 2px;--jp-ui-font-scale-factor: 1.2;--jp-ui-font-size0: .83333em;--jp-ui-font-size1: 13px;--jp-ui-font-size2: 1.2em;--jp-ui-font-size3: 1.44em;--jp-ui-font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";--jp-ui-font-color0: rgba(0, 0, 0, 1);--jp-ui-font-color1: rgba(0, 0, 0, .87);--jp-ui-font-color2: rgba(0, 0, 0, .54);--jp-ui-font-color3: rgba(0, 0, 0, .38);--jp-ui-inverse-font-color0: rgba(255, 255, 255, 1);--jp-ui-inverse-font-color1: rgba(255, 255, 255, 1);--jp-ui-inverse-font-color2: rgba(255, 255, 255, .7);--jp-ui-inverse-font-color3: rgba(255, 255, 255, .5);--jp-content-line-height: 1.6;--jp-content-font-scale-factor: 1.2;--jp-content-font-size0: .83333em;--jp-content-font-size1: 14px;--jp-content-font-size2: 1.2em;--jp-content-font-size3: 1.44em;--jp-content-font-size4: 1.728em;--jp-content-font-size5: 2.0736em;--jp-content-presentation-font-size1: 17px;--jp-content-heading-line-height: 1;--jp-content-heading-margin-top: 1.2em;--jp-content-heading-margin-bottom: .8em;--jp-content-heading-font-weight: 500;--jp-content-font-color0: rgba(0, 0, 0, 1);--jp-content-font-color1: rgba(0, 0, 0, .87);--jp-content-font-color2: rgba(0, 0, 0, .54);--jp-content-font-color3: rgba(0, 0, 0, .38);--jp-content-link-color: var(--md-blue-700);--jp-content-font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";--jp-code-font-size: 13px;--jp-code-line-height: 1.3077;--jp-code-padding: 5px;--jp-code-font-family-default: Menlo, Consolas, \"DejaVu Sans Mono\", monospace;--jp-code-font-family: var(--jp-code-font-family-default);--jp-code-presentation-font-size: 16px;--jp-code-cursor-width0: 1.4px;--jp-code-cursor-width1: 2px;--jp-code-cursor-width2: 4px;--jp-layout-color0: white;--jp-layout-color1: white;--jp-layout-color2: var(--md-grey-200);--jp-layout-color3: var(--md-grey-400);--jp-layout-color4: var(--md-grey-600);--jp-inverse-layout-color0: #111111;--jp-inverse-layout-color1: var(--md-grey-900);--jp-inverse-layout-color2: var(--md-grey-800);--jp-inverse-layout-color3: var(--md-grey-700);--jp-inverse-layout-color4: var(--md-grey-600);--jp-brand-color0: var(--md-blue-900);--jp-brand-color1: var(--md-blue-700);--jp-brand-color2: var(--md-blue-300);--jp-brand-color3: var(--md-blue-100);--jp-brand-color4: var(--md-blue-50);--jp-accent-color0: var(--md-green-900);--jp-accent-color1: var(--md-green-700);--jp-accent-color2: var(--md-green-300);--jp-accent-color3: var(--md-green-100);--jp-warn-color0: var(--md-orange-900);--jp-warn-color1: var(--md-orange-700);--jp-warn-color2: var(--md-orange-300);--jp-warn-color3: var(--md-orange-100);--jp-error-color0: var(--md-red-900);--jp-error-color1: var(--md-red-700);--jp-error-color2: var(--md-red-300);--jp-error-color3: var(--md-red-100);--jp-success-color0: var(--md-green-900);--jp-success-color1: var(--md-green-700);--jp-success-color2: var(--md-green-300);--jp-success-color3: var(--md-green-100);--jp-info-color0: var(--md-cyan-900);--jp-info-color1: var(--md-cyan-700);--jp-info-color2: var(--md-cyan-300);--jp-info-color3: var(--md-cyan-100);--jp-cell-padding: 5px;--jp-cell-collapser-width: 8px;--jp-cell-collapser-min-height: 20px;--jp-cell-collapser-not-active-hover-opacity: .6;--jp-cell-editor-background: var(--md-grey-100);--jp-cell-editor-border-color: var(--md-grey-300);--jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);--jp-cell-editor-active-background: var(--jp-layout-color0);--jp-cell-editor-active-border-color: var(--jp-brand-color1);--jp-cell-prompt-width: 64px;--jp-cell-prompt-font-family: var(--jp-code-font-family-default);--jp-cell-prompt-letter-spacing: 0px;--jp-cell-prompt-opacity: 1;--jp-cell-prompt-not-active-opacity: .5;--jp-cell-prompt-not-active-font-color: var(--md-grey-700);--jp-cell-inprompt-font-color: #307fc1;--jp-cell-outprompt-font-color: #bf5b3d;--jp-notebook-padding: 10px;--jp-notebook-select-background: var(--jp-layout-color1);--jp-notebook-multiselected-color: var(--md-blue-50);--jp-notebook-scroll-padding: calc( 100% - var(--jp-code-font-size) * var(--jp-code-line-height) - var(--jp-code-padding) - var(--jp-cell-padding) - 1px );--jp-rendermime-error-background: #fdd;--jp-rendermime-table-row-background: var(--md-grey-100);--jp-rendermime-table-row-hover-background: var(--md-light-blue-50);--jp-dialog-background: rgba(0, 0, 0, .25);--jp-console-padding: 10px;--jp-toolbar-border-color: var(--jp-border-color1);--jp-toolbar-micro-height: 8px;--jp-toolbar-background: var(--jp-layout-color1);--jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, .24);--jp-toolbar-header-margin: 4px 4px 0px 4px;--jp-toolbar-active-background: var(--md-grey-300);--jp-statusbar-height: 24px;--jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);--jp-input-active-background: var(--jp-layout-color1);--jp-input-hover-background: var(--jp-layout-color1);--jp-input-background: var(--md-grey-100);--jp-input-border-color: var(--jp-inverse-border-color);--jp-input-active-border-color: var(--jp-brand-color1);--jp-input-active-box-shadow-color: rgba(19, 124, 189, .3);--jp-editor-selected-background: #d9d9d9;--jp-editor-selected-focused-background: #d7d4f0;--jp-editor-cursor-color: var(--jp-ui-font-color0);--jp-mirror-editor-keyword-color: #008000;--jp-mirror-editor-atom-color: #88f;--jp-mirror-editor-number-color: #080;--jp-mirror-editor-def-color: #00f;--jp-mirror-editor-variable-color: var(--md-grey-900);--jp-mirror-editor-variable-2-color: #05a;--jp-mirror-editor-variable-3-color: #085;--jp-mirror-editor-punctuation-color: #05a;--jp-mirror-editor-property-color: #05a;--jp-mirror-editor-operator-color: #aa22ff;--jp-mirror-editor-comment-color: #408080;--jp-mirror-editor-string-color: #ba2121;--jp-mirror-editor-string-2-color: #708;--jp-mirror-editor-meta-color: #aa22ff;--jp-mirror-editor-qualifier-color: #555;--jp-mirror-editor-builtin-color: #008000;--jp-mirror-editor-bracket-color: #997;--jp-mirror-editor-tag-color: #170;--jp-mirror-editor-attribute-color: #00c;--jp-mirror-editor-header-color: blue;--jp-mirror-editor-quote-color: #090;--jp-mirror-editor-link-color: #00c;--jp-mirror-editor-error-color: #f00;--jp-mirror-editor-hr-color: #999;--jp-collaborator-color1: #ffad8e;--jp-collaborator-color2: #dac83d;--jp-collaborator-color3: #72dd76;--jp-collaborator-color4: #00e4d0;--jp-collaborator-color5: #45d4ff;--jp-collaborator-color6: #e2b1ff;--jp-collaborator-color7: #ff9de6;--jp-vega-background: white;--jp-sidebar-min-width: 250px;--jp-search-toggle-off-opacity: .5;--jp-search-toggle-hover-opacity: .8;--jp-search-toggle-on-opacity: 1;--jp-search-selected-match-background-color: rgb(245, 200, 0);--jp-search-selected-match-color: black;--jp-search-unselected-match-background-color: var( --jp-inverse-layout-color0 );--jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);--jp-icon-contrast-color0: var(--md-purple-600);--jp-icon-contrast-color1: var(--md-green-600);--jp-icon-contrast-color2: var(--md-pink-600);--jp-icon-contrast-color3: var(--md-blue-600);--jp-jupyter-icon-color: #f37626;--jp-notebook-icon-color: #f37626;--jp-json-icon-color: var(--md-orange-700);--jp-console-icon-background-color: var(--md-blue-700);--jp-console-icon-color: white;--jp-terminal-icon-background-color: var(--md-grey-800);--jp-terminal-icon-color: var(--md-grey-200);--jp-text-editor-icon-color: var(--md-grey-700);--jp-inspector-icon-color: var(--md-grey-700);--jp-switch-color: var(--md-grey-400);--jp-switch-true-position-color: var(--md-orange-900)}[data-md-color-scheme=slate] .jupyter-wrapper{--jp-shadow-base-lightness: 32;--jp-shadow-umbra-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .2 );--jp-shadow-penumbra-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .14 );--jp-shadow-ambient-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .12 );--jp-elevation-z0: none;--jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color), 0px 1px 1px 0px var(--jp-shadow-penumbra-color), 0px 1px 3px 0px var(--jp-shadow-ambient-color);--jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color), 0px 2px 2px 0px var(--jp-shadow-penumbra-color), 0px 1px 5px 0px var(--jp-shadow-ambient-color);--jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color), 0px 4px 5px 0px var(--jp-shadow-penumbra-color), 0px 1px 10px 0px var(--jp-shadow-ambient-color);--jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color), 0px 6px 10px 0px var(--jp-shadow-penumbra-color), 0px 1px 18px 0px var(--jp-shadow-ambient-color);--jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color), 0px 8px 10px 1px var(--jp-shadow-penumbra-color), 0px 3px 14px 2px var(--jp-shadow-ambient-color);--jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color), 0px 12px 17px 2px var(--jp-shadow-penumbra-color), 0px 5px 22px 4px var(--jp-shadow-ambient-color);--jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color), 0px 16px 24px 2px var(--jp-shadow-penumbra-color), 0px 6px 30px 5px var(--jp-shadow-ambient-color);--jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color), 0px 20px 31px 3px var(--jp-shadow-penumbra-color), 0px 8px 38px 7px var(--jp-shadow-ambient-color);--jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color), 0px 24px 38px 3px var(--jp-shadow-penumbra-color), 0px 9px 46px 8px var(--jp-shadow-ambient-color);--jp-border-width: 1px;--jp-border-color0: var(--md-grey-700);--jp-border-color1: var(--md-grey-700);--jp-border-color2: var(--md-grey-800);--jp-border-color3: var(--md-grey-900);--jp-inverse-border-color: var(--md-grey-600);--jp-border-radius: 2px;--jp-ui-font-scale-factor: 1.2;--jp-ui-font-size0: .83333em;--jp-ui-font-size1: 13px;--jp-ui-font-size2: 1.2em;--jp-ui-font-size3: 1.44em;--jp-ui-font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";--jp-ui-font-color0: rgba(255, 255, 255, 1);--jp-ui-font-color1: rgba(255, 255, 255, .87);--jp-ui-font-color2: rgba(255, 255, 255, .54);--jp-ui-font-color3: rgba(255, 255, 255, .38);--jp-ui-inverse-font-color0: rgba(0, 0, 0, 1);--jp-ui-inverse-font-color1: rgba(0, 0, 0, .8);--jp-ui-inverse-font-color2: rgba(0, 0, 0, .5);--jp-ui-inverse-font-color3: rgba(0, 0, 0, .3);--jp-content-line-height: 1.6;--jp-content-font-scale-factor: 1.2;--jp-content-font-size0: .83333em;--jp-content-font-size1: 14px;--jp-content-font-size2: 1.2em;--jp-content-font-size3: 1.44em;--jp-content-font-size4: 1.728em;--jp-content-font-size5: 2.0736em;--jp-content-presentation-font-size1: 17px;--jp-content-heading-line-height: 1;--jp-content-heading-margin-top: 1.2em;--jp-content-heading-margin-bottom: .8em;--jp-content-heading-font-weight: 500;--jp-content-font-color0: rgba(255, 255, 255, 1);--jp-content-font-color1: rgba(255, 255, 255, 1);--jp-content-font-color2: rgba(255, 255, 255, .7);--jp-content-font-color3: rgba(255, 255, 255, .5);--jp-content-link-color: var(--md-blue-300);--jp-content-font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";--jp-code-font-size: 13px;--jp-code-line-height: 1.3077;--jp-code-padding: 5px;--jp-code-font-family-default: Menlo, Consolas, \"DejaVu Sans Mono\", monospace;--jp-code-font-family: var(--jp-code-font-family-default);--jp-code-presentation-font-size: 16px;--jp-code-cursor-width0: 1.4px;--jp-code-cursor-width1: 2px;--jp-code-cursor-width2: 4px;--jp-layout-color0: #111111;--jp-layout-color1: var(--md-grey-900);--jp-layout-color2: var(--md-grey-800);--jp-layout-color3: var(--md-grey-700);--jp-layout-color4: var(--md-grey-600);--jp-inverse-layout-color0: white;--jp-inverse-layout-color1: white;--jp-inverse-layout-color2: var(--md-grey-200);--jp-inverse-layout-color3: var(--md-grey-400);--jp-inverse-layout-color4: var(--md-grey-600);--jp-brand-color0: var(--md-blue-700);--jp-brand-color1: var(--md-blue-500);--jp-brand-color2: var(--md-blue-300);--jp-brand-color3: var(--md-blue-100);--jp-brand-color4: var(--md-blue-50);--jp-accent-color0: var(--md-green-700);--jp-accent-color1: var(--md-green-500);--jp-accent-color2: var(--md-green-300);--jp-accent-color3: var(--md-green-100);--jp-warn-color0: var(--md-orange-700);--jp-warn-color1: var(--md-orange-500);--jp-warn-color2: var(--md-orange-300);--jp-warn-color3: var(--md-orange-100);--jp-error-color0: var(--md-red-700);--jp-error-color1: var(--md-red-500);--jp-error-color2: var(--md-red-300);--jp-error-color3: var(--md-red-100);--jp-success-color0: var(--md-green-700);--jp-success-color1: var(--md-green-500);--jp-success-color2: var(--md-green-300);--jp-success-color3: var(--md-green-100);--jp-info-color0: var(--md-cyan-700);--jp-info-color1: var(--md-cyan-500);--jp-info-color2: var(--md-cyan-300);--jp-info-color3: var(--md-cyan-100);--jp-cell-padding: 5px;--jp-cell-collapser-width: 8px;--jp-cell-collapser-min-height: 20px;--jp-cell-collapser-not-active-hover-opacity: .6;--jp-cell-editor-background: var(--jp-layout-color1);--jp-cell-editor-border-color: var(--md-grey-700);--jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);--jp-cell-editor-active-background: var(--jp-layout-color0);--jp-cell-editor-active-border-color: var(--jp-brand-color1);--jp-cell-prompt-width: 64px;--jp-cell-prompt-font-family: var(--jp-code-font-family-default);--jp-cell-prompt-letter-spacing: 0px;--jp-cell-prompt-opacity: 1;--jp-cell-prompt-not-active-opacity: 1;--jp-cell-prompt-not-active-font-color: var(--md-grey-300);--jp-cell-inprompt-font-color: #307fc1;--jp-cell-outprompt-font-color: #bf5b3d;--jp-notebook-padding: 10px;--jp-notebook-select-background: var(--jp-layout-color1);--jp-notebook-multiselected-color: rgba(33, 150, 243, .24);--jp-notebook-scroll-padding: calc( 100% - var(--jp-code-font-size) * var(--jp-code-line-height) - var(--jp-code-padding) - var(--jp-cell-padding) - 1px );--jp-rendermime-error-background: rgba(244, 67, 54, .28);--jp-rendermime-table-row-background: var(--md-grey-900);--jp-rendermime-table-row-hover-background: rgba(3, 169, 244, .2);--jp-dialog-background: rgba(0, 0, 0, .6);--jp-console-padding: 10px;--jp-toolbar-border-color: var(--jp-border-color2);--jp-toolbar-micro-height: 8px;--jp-toolbar-background: var(--jp-layout-color1);--jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, .8);--jp-toolbar-header-margin: 4px 4px 0px 4px;--jp-toolbar-active-background: var(--jp-layout-color0);--jp-statusbar-height: 24px;--jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);--jp-input-active-background: var(--jp-layout-color0);--jp-input-hover-background: var(--jp-layout-color2);--jp-input-background: var(--md-grey-800);--jp-input-border-color: var(--jp-inverse-border-color);--jp-input-active-border-color: var(--jp-brand-color1);--jp-input-active-box-shadow-color: rgba(19, 124, 189, .3);--jp-editor-selected-background: var(--jp-layout-color2);--jp-editor-selected-focused-background: rgba(33, 150, 243, .24);--jp-editor-cursor-color: var(--jp-ui-font-color0);--jp-mirror-editor-keyword-color: var(--md-green-500);--jp-mirror-editor-atom-color: var(--md-blue-300);--jp-mirror-editor-number-color: var(--md-green-400);--jp-mirror-editor-def-color: var(--md-blue-600);--jp-mirror-editor-variable-color: var(--md-grey-300);--jp-mirror-editor-variable-2-color: var(--md-blue-400);--jp-mirror-editor-variable-3-color: var(--md-green-600);--jp-mirror-editor-punctuation-color: var(--md-blue-400);--jp-mirror-editor-property-color: var(--md-blue-400);--jp-mirror-editor-operator-color: #aa22ff;--jp-mirror-editor-comment-color: #408080;--jp-mirror-editor-string-color: #ff7070;--jp-mirror-editor-string-2-color: var(--md-purple-300);--jp-mirror-editor-meta-color: #aa22ff;--jp-mirror-editor-qualifier-color: #555;--jp-mirror-editor-builtin-color: var(--md-green-600);--jp-mirror-editor-bracket-color: #997;--jp-mirror-editor-tag-color: var(--md-green-700);--jp-mirror-editor-attribute-color: var(--md-blue-700);--jp-mirror-editor-header-color: var(--md-blue-500);--jp-mirror-editor-quote-color: var(--md-green-300);--jp-mirror-editor-link-color: var(--md-blue-700);--jp-mirror-editor-error-color: #f00;--jp-mirror-editor-hr-color: #999;--jp-collaborator-color1: #ad4a00;--jp-collaborator-color2: #7b6a00;--jp-collaborator-color3: #007e00;--jp-collaborator-color4: #008772;--jp-collaborator-color5: #0079b9;--jp-collaborator-color6: #8b45c6;--jp-collaborator-color7: #be208b;--jp-vega-background: var(--md-grey-400);--jp-sidebar-min-width: 250px;--jp-search-toggle-off-opacity: .6;--jp-search-toggle-hover-opacity: .8;--jp-search-toggle-on-opacity: 1;--jp-search-selected-match-background-color: rgb(255, 225, 0);--jp-search-selected-match-color: black;--jp-search-unselected-match-background-color: var( --jp-inverse-layout-color0 );--jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);--jp-scrollbar-background-color: #3f4244;--jp-scrollbar-thumb-color: 88, 96, 97;--jp-scrollbar-endpad: 3px;--jp-scrollbar-thumb-margin: 3.5px;--jp-scrollbar-thumb-radius: 9px;--jp-icon-contrast-color0: var(--md-purple-600);--jp-icon-contrast-color1: var(--md-green-600);--jp-icon-contrast-color2: var(--md-pink-600);--jp-icon-contrast-color3: var(--md-blue-600);--jp-jupyter-icon-color: #f37626;--jp-notebook-icon-color: #f37626;--jp-json-icon-color: var(--md-orange-500);--jp-console-icon-background-color: var(--md-blue-500);--jp-console-icon-color: white;--jp-terminal-icon-background-color: var(--md-grey-200);--jp-terminal-icon-color: var(--md-grey-800);--jp-text-editor-icon-color: var(--md-grey-200);--jp-inspector-icon-color: var(--md-grey-200);--jp-switch-color: var(--md-grey-400);--jp-switch-true-position-color: var(--md-orange-700)}.jupyter-wrapper [data-jp-theme-scrollbars=true]{scrollbar-color:rgb(var(--jp-scrollbar-thumb-color)) var(--jp-scrollbar-background-color)}.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-hscrollbar,.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-vscrollbar{scrollbar-color:rgba(var(--jp-scrollbar-thumb-color),.5) transparent}.jupyter-wrapper .jp-scrollbar-tiny{scrollbar-color:rgba(var(--jp-scrollbar-thumb-color),.5) transparent;scrollbar-width:thin}.jupyter-wrapper [data-jp-theme-scrollbars=true] ::-webkit-scrollbar,.jupyter-wrapper [data-jp-theme-scrollbars=true] ::-webkit-scrollbar-corner{background:var(--jp-scrollbar-background-color)}.jupyter-wrapper [data-jp-theme-scrollbars=true] ::-webkit-scrollbar-thumb{background:rgb(var(--jp-scrollbar-thumb-color));border:var(--jp-scrollbar-thumb-margin) solid transparent;background-clip:content-box;border-radius:var(--jp-scrollbar-thumb-radius)}.jupyter-wrapper [data-jp-theme-scrollbars=true] ::-webkit-scrollbar-track:horizontal{border-left:var(--jp-scrollbar-endpad) solid var(--jp-scrollbar-background-color);border-right:var(--jp-scrollbar-endpad) solid var(--jp-scrollbar-background-color)}.jupyter-wrapper [data-jp-theme-scrollbars=true] ::-webkit-scrollbar-track:vertical{border-top:var(--jp-scrollbar-endpad) solid var(--jp-scrollbar-background-color);border-bottom:var(--jp-scrollbar-endpad) solid var(--jp-scrollbar-background-color)}.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-hscrollbar::-webkit-scrollbar,.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-vscrollbar::-webkit-scrollbar,.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-hscrollbar::-webkit-scrollbar-corner,.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-vscrollbar::-webkit-scrollbar-corner{background-color:transparent}.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-hscrollbar::-webkit-scrollbar-thumb,.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-vscrollbar::-webkit-scrollbar-thumb{background:rgba(var(--jp-scrollbar-thumb-color),.5);border:var(--jp-scrollbar-thumb-margin) solid transparent;background-clip:content-box;border-radius:var(--jp-scrollbar-thumb-radius)}.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-hscrollbar::-webkit-scrollbar-track:horizontal{border-left:var(--jp-scrollbar-endpad) solid transparent;border-right:var(--jp-scrollbar-endpad) solid transparent}.jupyter-wrapper [data-jp-theme-scrollbars=true] .CodeMirror-vscrollbar::-webkit-scrollbar-track:vertical{border-top:var(--jp-scrollbar-endpad) solid transparent;border-bottom:var(--jp-scrollbar-endpad) solid transparent}.jupyter-wrapper .jp-scrollbar-tiny::-webkit-scrollbar,.jupyter-wrapper .jp-scrollbar-tiny::-webkit-scrollbar-corner{background-color:transparent;height:4px;width:4px}.jupyter-wrapper .jp-scrollbar-tiny::-webkit-scrollbar-thumb{background:rgba(var(--jp-scrollbar-thumb-color),.5)}.jupyter-wrapper .jp-scrollbar-tiny::-webkit-scrollbar-track:horizontal{border-left:0px solid transparent;border-right:0px solid transparent}.jupyter-wrapper .jp-scrollbar-tiny::-webkit-scrollbar-track:vertical{border-top:0px solid transparent;border-bottom:0px solid transparent}.jupyter-wrapper .lm-ScrollBar[data-orientation=horizontal]{min-height:16px;max-height:16px;min-width:45px;border-top:1px solid #a0a0a0}.jupyter-wrapper .lm-ScrollBar[data-orientation=vertical]{min-width:16px;max-width:16px;min-height:45px;border-left:1px solid #a0a0a0}.jupyter-wrapper .lm-ScrollBar-button{background-color:#f0f0f0;background-position:center center;min-height:15px;max-height:15px;min-width:15px;max-width:15px}.jupyter-wrapper .lm-ScrollBar-button:hover{background-color:#dadada}.jupyter-wrapper .lm-ScrollBar-button.lm-mod-active{background-color:#cdcdcd}.jupyter-wrapper .lm-ScrollBar-track{background:#f0f0f0}.jupyter-wrapper .lm-ScrollBar-thumb{background:#cdcdcd}.jupyter-wrapper .lm-ScrollBar-thumb:hover{background:#bababa}.jupyter-wrapper .lm-ScrollBar-thumb.lm-mod-active{background:#a0a0a0}.jupyter-wrapper .lm-ScrollBar[data-orientation=horizontal] .lm-ScrollBar-thumb{height:100%;min-width:15px;border-left:1px solid #a0a0a0;border-right:1px solid #a0a0a0}.jupyter-wrapper .lm-ScrollBar[data-orientation=vertical] .lm-ScrollBar-thumb{width:100%;min-height:15px;border-top:1px solid #a0a0a0;border-bottom:1px solid #a0a0a0}.jupyter-wrapper .lm-ScrollBar[data-orientation=horizontal] .lm-ScrollBar-button[data-action=decrement]{background-image:var(--jp-icon-caret-left);background-size:17px}.jupyter-wrapper .lm-ScrollBar[data-orientation=horizontal] .lm-ScrollBar-button[data-action=increment]{background-image:var(--jp-icon-caret-right);background-size:17px}.jupyter-wrapper .lm-ScrollBar[data-orientation=vertical] .lm-ScrollBar-button[data-action=decrement]{background-image:var(--jp-icon-caret-up);background-size:17px}.jupyter-wrapper .lm-ScrollBar[data-orientation=vertical] .lm-ScrollBar-button[data-action=increment]{background-image:var(--jp-icon-caret-down);background-size:17px}.jupyter-wrapper .p-Widget,.jupyter-wrapper .lm-Widget{box-sizing:border-box;position:relative;overflow:hidden;cursor:default}.jupyter-wrapper .p-Widget.p-mod-hidden,.jupyter-wrapper .lm-Widget.lm-mod-hidden{display:none!important}.jupyter-wrapper .lm-AccordionPanel[data-orientation=horizontal]>.lm-AccordionPanel-title{display:block;transform-origin:top left;transform:rotate(-90deg) translate(-100%)}.jupyter-wrapper .p-CommandPalette,.jupyter-wrapper .lm-CommandPalette{display:flex;flex-direction:column;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .p-CommandPalette-search,.jupyter-wrapper .lm-CommandPalette-search{flex:0 0 auto}.jupyter-wrapper .p-CommandPalette-content,.jupyter-wrapper .lm-CommandPalette-content{flex:1 1 auto;margin:0;padding:0;min-height:0;overflow:auto;list-style-type:none}.jupyter-wrapper .p-CommandPalette-header,.jupyter-wrapper .lm-CommandPalette-header{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.jupyter-wrapper .p-CommandPalette-item,.jupyter-wrapper .lm-CommandPalette-item{display:flex;flex-direction:row}.jupyter-wrapper .p-CommandPalette-itemIcon,.jupyter-wrapper .lm-CommandPalette-itemIcon{flex:0 0 auto}.jupyter-wrapper .p-CommandPalette-itemContent,.jupyter-wrapper .lm-CommandPalette-itemContent{flex:1 1 auto;overflow:hidden}.jupyter-wrapper .p-CommandPalette-itemShortcut,.jupyter-wrapper .lm-CommandPalette-itemShortcut{flex:0 0 auto}.jupyter-wrapper .p-CommandPalette-itemLabel,.jupyter-wrapper .lm-CommandPalette-itemLabel{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.jupyter-wrapper .lm-close-icon{border:1px solid transparent;background-color:transparent;position:absolute;z-index:1;right:3%;top:0;bottom:0;margin:auto;padding:7px 0;display:none;vertical-align:middle;outline:0;cursor:pointer}.jupyter-wrapper .lm-close-icon:after{content:\"X\";display:block;width:15px;height:15px;text-align:center;color:#000;font-weight:400;font-size:12px;cursor:pointer}.jupyter-wrapper .p-DockPanel,.jupyter-wrapper .lm-DockPanel,.jupyter-wrapper .p-DockPanel-widget,.jupyter-wrapper .lm-DockPanel-widget{z-index:0}.jupyter-wrapper .p-DockPanel-tabBar,.jupyter-wrapper .lm-DockPanel-tabBar{z-index:1}.jupyter-wrapper .p-DockPanel-handle,.jupyter-wrapper .lm-DockPanel-handle{z-index:2}.jupyter-wrapper .p-DockPanel-handle.p-mod-hidden,.jupyter-wrapper .lm-DockPanel-handle.lm-mod-hidden{display:none!important}.jupyter-wrapper .p-DockPanel-handle:after,.jupyter-wrapper .lm-DockPanel-handle:after{position:absolute;top:0;left:0;width:100%;height:100%;content:\"\"}.jupyter-wrapper .p-DockPanel-handle[data-orientation=horizontal],.jupyter-wrapper .lm-DockPanel-handle[data-orientation=horizontal]{cursor:ew-resize}.jupyter-wrapper .p-DockPanel-handle[data-orientation=vertical],.jupyter-wrapper .lm-DockPanel-handle[data-orientation=vertical]{cursor:ns-resize}.jupyter-wrapper .p-DockPanel-handle[data-orientation=horizontal]:after,.jupyter-wrapper .lm-DockPanel-handle[data-orientation=horizontal]:after{left:50%;min-width:8px;transform:translate(-50%)}.jupyter-wrapper .p-DockPanel-handle[data-orientation=vertical]:after,.jupyter-wrapper .lm-DockPanel-handle[data-orientation=vertical]:after{top:50%;min-height:8px;transform:translateY(-50%)}.jupyter-wrapper .p-DockPanel-overlay,.jupyter-wrapper .lm-DockPanel-overlay{z-index:3;box-sizing:border-box;pointer-events:none}.jupyter-wrapper .p-DockPanel-overlay.p-mod-hidden,.jupyter-wrapper .lm-DockPanel-overlay.lm-mod-hidden{display:none!important}.jupyter-wrapper .p-Menu,.jupyter-wrapper .lm-Menu{z-index:10000;position:absolute;white-space:nowrap;overflow-x:hidden;overflow-y:auto;outline:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .p-Menu-content,.jupyter-wrapper .lm-Menu-content{margin:0;padding:0;display:table;list-style-type:none}.jupyter-wrapper .p-Menu-item,.jupyter-wrapper .lm-Menu-item{display:table-row}.jupyter-wrapper .p-Menu-item.p-mod-hidden,.jupyter-wrapper .p-Menu-item.p-mod-collapsed,.jupyter-wrapper .lm-Menu-item.lm-mod-hidden,.jupyter-wrapper .lm-Menu-item.lm-mod-collapsed{display:none!important}.jupyter-wrapper .p-Menu-itemIcon,.jupyter-wrapper .p-Menu-itemSubmenuIcon,.jupyter-wrapper .lm-Menu-itemIcon,.jupyter-wrapper .lm-Menu-itemSubmenuIcon{display:table-cell;text-align:center}.jupyter-wrapper .p-Menu-itemLabel,.jupyter-wrapper .lm-Menu-itemLabel{display:table-cell;text-align:left}.jupyter-wrapper .p-Menu-itemShortcut,.jupyter-wrapper .lm-Menu-itemShortcut{display:table-cell;text-align:right}.jupyter-wrapper .p-MenuBar,.jupyter-wrapper .lm-MenuBar{outline:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .p-MenuBar-content,.jupyter-wrapper .lm-MenuBar-content{margin:0;padding:0;display:flex;flex-direction:row;list-style-type:none}.jupyter-wrapper .p--MenuBar-item,.jupyter-wrapper .lm-MenuBar-item{box-sizing:border-box}.jupyter-wrapper .p-MenuBar-itemIcon,.jupyter-wrapper .p-MenuBar-itemLabel,.jupyter-wrapper .lm-MenuBar-itemIcon,.jupyter-wrapper .lm-MenuBar-itemLabel{display:inline-block}.jupyter-wrapper .p-ScrollBar,.jupyter-wrapper .lm-ScrollBar{display:flex;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .p-ScrollBar[data-orientation=horizontal],.jupyter-wrapper .lm-ScrollBar[data-orientation=horizontal]{flex-direction:row}.jupyter-wrapper .p-ScrollBar[data-orientation=vertical],.jupyter-wrapper .lm-ScrollBar[data-orientation=vertical]{flex-direction:column}.jupyter-wrapper .p-ScrollBar-button,.jupyter-wrapper .lm-ScrollBar-button{box-sizing:border-box;flex:0 0 auto}.jupyter-wrapper .p-ScrollBar-track,.jupyter-wrapper .lm-ScrollBar-track{box-sizing:border-box;position:relative;overflow:hidden;flex:1 1 auto}.jupyter-wrapper .p-ScrollBar-thumb,.jupyter-wrapper .lm-ScrollBar-thumb{box-sizing:border-box;position:absolute}.jupyter-wrapper .p-SplitPanel-child,.jupyter-wrapper .lm-SplitPanel-child{z-index:0}.jupyter-wrapper .p-SplitPanel-handle,.jupyter-wrapper .lm-SplitPanel-handle{z-index:1}.jupyter-wrapper .p-SplitPanel-handle.p-mod-hidden,.jupyter-wrapper .lm-SplitPanel-handle.lm-mod-hidden{display:none!important}.jupyter-wrapper .p-SplitPanel-handle:after,.jupyter-wrapper .lm-SplitPanel-handle:after{position:absolute;top:0;left:0;width:100%;height:100%;content:\"\"}.jupyter-wrapper .p-SplitPanel[data-orientation=horizontal]>.p-SplitPanel-handle,.jupyter-wrapper .lm-SplitPanel[data-orientation=horizontal]>.lm-SplitPanel-handle{cursor:ew-resize}.jupyter-wrapper .p-SplitPanel[data-orientation=vertical]>.p-SplitPanel-handle,.jupyter-wrapper .lm-SplitPanel[data-orientation=vertical]>.lm-SplitPanel-handle{cursor:ns-resize}.jupyter-wrapper .p-SplitPanel[data-orientation=horizontal]>.p-SplitPanel-handle:after,.jupyter-wrapper .lm-SplitPanel[data-orientation=horizontal]>.lm-SplitPanel-handle:after{left:50%;min-width:8px;transform:translate(-50%)}.jupyter-wrapper .p-SplitPanel[data-orientation=vertical]>.p-SplitPanel-handle:after,.jupyter-wrapper .lm-SplitPanel[data-orientation=vertical]>.lm-SplitPanel-handle:after{top:50%;min-height:8px;transform:translateY(-50%)}.jupyter-wrapper .p-TabBar,.jupyter-wrapper .lm-TabBar{display:flex;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .p-TabBar[data-orientation=horizontal],.jupyter-wrapper .lm-TabBar[data-orientation=horizontal]{flex-direction:row;align-items:flex-end}.jupyter-wrapper .p-TabBar[data-orientation=vertical],.jupyter-wrapper .lm-TabBar[data-orientation=vertical]{flex-direction:column;align-items:flex-end}.jupyter-wrapper .p-TabBar-content,.jupyter-wrapper .lm-TabBar-content{margin:0;padding:0;display:flex;flex:1 1 auto;list-style-type:none}.jupyter-wrapper .p-TabBar[data-orientation=horizontal]>.p-TabBar-content,.jupyter-wrapper .lm-TabBar[data-orientation=horizontal]>.lm-TabBar-content{flex-direction:row}.jupyter-wrapper .p-TabBar[data-orientation=vertical]>.p-TabBar-content,.jupyter-wrapper .lm-TabBar[data-orientation=vertical]>.lm-TabBar-content{flex-direction:column}.jupyter-wrapper .p-TabBar-tab,.jupyter-wrapper .lm-TabBar-tab{display:flex;flex-direction:row;box-sizing:border-box;overflow:hidden;touch-action:none}.jupyter-wrapper .p-TabBar-tabIcon,.jupyter-wrapper .p-TabBar-tabCloseIcon,.jupyter-wrapper .lm-TabBar-tabIcon,.jupyter-wrapper .lm-TabBar-tabCloseIcon{flex:0 0 auto}.jupyter-wrapper .p-TabBar-tabLabel,.jupyter-wrapper .lm-TabBar-tabLabel{flex:1 1 auto;overflow:hidden;white-space:nowrap}.jupyter-wrapper .lm-TabBar-tabInput{-webkit-user-select:all;user-select:all;width:100%;box-sizing:border-box}.jupyter-wrapper .p-TabBar-tab.p-mod-hidden,.jupyter-wrapper .lm-TabBar-tab.lm-mod-hidden,.jupyter-wrapper .lm-TabBar-addButton.lm-mod-hidden{display:none!important}.jupyter-wrapper .p-TabBar.p-mod-dragging .p-TabBar-tab,.jupyter-wrapper .lm-TabBar.lm-mod-dragging .lm-TabBar-tab{position:relative}.jupyter-wrapper .p-TabBar.p-mod-dragging[data-orientation=horizontal] .p-TabBar-tab,.jupyter-wrapper .lm-TabBar.lm-mod-dragging[data-orientation=horizontal] .lm-TabBar-tab{left:0;transition:left .15s ease}.jupyter-wrapper .p-TabBar.p-mod-dragging[data-orientation=vertical] .p-TabBar-tab,.jupyter-wrapper .lm-TabBar.lm-mod-dragging[data-orientation=vertical] .lm-TabBar-tab{top:0;transition:top .15s ease}.jupyter-wrapper .p-TabBar.p-mod-dragging .p-TabBar-tab.p-mod-dragging,.jupyter-wrapper .lm-TabBar.lm-mod-dragging .lm-TabBar-tab.lm-mod-dragging{transition:none}.jupyter-wrapper .lm-TabBar-tabLabel .lm-TabBar-tabInput{-webkit-user-select:all;user-select:all;width:100%;box-sizing:border-box;background:inherit}.jupyter-wrapper .p-TabPanel-tabBar,.jupyter-wrapper .lm-TabPanel-tabBar{z-index:1}.jupyter-wrapper .p-TabPanel-stackedPanel,.jupyter-wrapper .lm-TabPanel-stackedPanel{z-index:0}.jupyter-wrapper html{-webkit-box-sizing:border-box;box-sizing:border-box}.jupyter-wrapper *,.jupyter-wrapper *:before,.jupyter-wrapper *:after{-webkit-box-sizing:inherit;box-sizing:inherit}.jupyter-wrapper body{font-size:14px;font-weight:400;letter-spacing:0;line-height:1.28581;text-transform:none;color:#182026;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Open Sans,Helvetica Neue,Icons16,sans-serif}.jupyter-wrapper p{margin-bottom:10px;margin-top:0}.jupyter-wrapper small{font-size:12px}.jupyter-wrapper strong{font-weight:600}.jupyter-wrapper ::-moz-selection{background:rgba(125,188,255,.6)}.jupyter-wrapper ::selection{background:rgba(125,188,255,.6)}.jupyter-wrapper .bp3-heading{color:#182026;font-weight:600;margin:0 0 10px;padding:0}.jupyter-wrapper .bp3-dark .bp3-heading{color:#f5f8fa}.jupyter-wrapper h1.bp3-heading,.jupyter-wrapper .bp3-running-text h1{font-size:36px;line-height:40px}.jupyter-wrapper h2.bp3-heading,.jupyter-wrapper .bp3-running-text h2{font-size:28px;line-height:32px}.jupyter-wrapper h3.bp3-heading,.jupyter-wrapper .bp3-running-text h3{font-size:22px;line-height:25px}.jupyter-wrapper h4.bp3-heading,.jupyter-wrapper .bp3-running-text h4{font-size:18px;line-height:21px}.jupyter-wrapper h5.bp3-heading,.jupyter-wrapper .bp3-running-text h5{font-size:16px;line-height:19px}.jupyter-wrapper h6.bp3-heading,.jupyter-wrapper .bp3-running-text h6{font-size:14px;line-height:16px}.jupyter-wrapper .bp3-ui-text{font-size:14px;font-weight:400;letter-spacing:0;line-height:1.28581;text-transform:none}.jupyter-wrapper .bp3-monospace-text{font-family:monospace;text-transform:none}.jupyter-wrapper .bp3-text-muted{color:#5c7080}.jupyter-wrapper .bp3-dark .bp3-text-muted{color:#a7b6c2}.jupyter-wrapper .bp3-text-disabled{color:#5c708099}.jupyter-wrapper .bp3-dark .bp3-text-disabled{color:#a7b6c299}.jupyter-wrapper .bp3-text-overflow-ellipsis{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal}.jupyter-wrapper .bp3-running-text{font-size:14px;line-height:1.5}.jupyter-wrapper .bp3-running-text h1{color:#182026;font-weight:600;margin-bottom:20px;margin-top:40px}.jupyter-wrapper .bp3-dark .bp3-running-text h1{color:#f5f8fa}.jupyter-wrapper .bp3-running-text h2{color:#182026;font-weight:600;margin-bottom:20px;margin-top:40px}.jupyter-wrapper .bp3-dark .bp3-running-text h2{color:#f5f8fa}.jupyter-wrapper .bp3-running-text h3{color:#182026;font-weight:600;margin-bottom:20px;margin-top:40px}.jupyter-wrapper .bp3-dark .bp3-running-text h3{color:#f5f8fa}.jupyter-wrapper .bp3-running-text h4{color:#182026;font-weight:600;margin-bottom:20px;margin-top:40px}.jupyter-wrapper .bp3-dark .bp3-running-text h4{color:#f5f8fa}.jupyter-wrapper .bp3-running-text h5{color:#182026;font-weight:600;margin-bottom:20px;margin-top:40px}.jupyter-wrapper .bp3-dark .bp3-running-text h5{color:#f5f8fa}.jupyter-wrapper .bp3-running-text h6{color:#182026;font-weight:600;margin-bottom:20px;margin-top:40px}.jupyter-wrapper .bp3-dark .bp3-running-text h6{color:#f5f8fa}.jupyter-wrapper .bp3-running-text hr{border:none;border-bottom:1px solid rgba(16,22,26,.15);margin:20px 0}.jupyter-wrapper .bp3-dark .bp3-running-text hr{border-color:#ffffff26}.jupyter-wrapper .bp3-running-text p{margin:0 0 10px;padding:0}.jupyter-wrapper .bp3-text-large{font-size:16px}.jupyter-wrapper .bp3-text-small{font-size:12px}.jupyter-wrapper a .bp3-icon,.jupyter-wrapper a .bp3-icon-standard,.jupyter-wrapper a .bp3-icon-large,.jupyter-wrapper a code,.jupyter-wrapper .bp3-dark a code{color:inherit}.jupyter-wrapper .bp3-dark a,.jupyter-wrapper .bp3-dark a:hover{color:#48aff0}.jupyter-wrapper .bp3-dark a .bp3-icon,.jupyter-wrapper .bp3-dark a .bp3-icon-standard,.jupyter-wrapper .bp3-dark a .bp3-icon-large,.jupyter-wrapper .bp3-dark a:hover .bp3-icon,.jupyter-wrapper .bp3-dark a:hover .bp3-icon-standard,.jupyter-wrapper .bp3-dark a:hover .bp3-icon-large{color:inherit}.jupyter-wrapper .bp3-running-text code,.jupyter-wrapper .bp3-code{font-family:monospace;text-transform:none;background:rgba(255,255,255,.7);border-radius:3px;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33;color:#5c7080;font-size:smaller;padding:2px 5px}.jupyter-wrapper .bp3-dark .bp3-running-text code,.jupyter-wrapper .bp3-running-text .bp3-dark code,.jupyter-wrapper .bp3-dark .bp3-code{background:rgba(16,22,26,.3);-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a66;color:#a7b6c2}.jupyter-wrapper .bp3-running-text a>code,.jupyter-wrapper a>.bp3-code{color:#137cbd}.jupyter-wrapper .bp3-dark .bp3-running-text a>code,.jupyter-wrapper .bp3-running-text .bp3-dark a>code,.jupyter-wrapper .bp3-dark a>.bp3-code{color:inherit}.jupyter-wrapper .bp3-running-text pre,.jupyter-wrapper .bp3-code-block{font-family:monospace;text-transform:none;background:rgba(255,255,255,.7);border-radius:3px;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.15);box-shadow:inset 0 0 0 1px #10161a26;color:#182026;display:block;font-size:13px;line-height:1.4;margin:10px 0;padding:13px 15px 12px;word-break:break-all;word-wrap:break-word}.jupyter-wrapper .bp3-dark .bp3-running-text pre,.jupyter-wrapper .bp3-running-text .bp3-dark pre,.jupyter-wrapper .bp3-dark .bp3-code-block{background:rgba(16,22,26,.3);-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-running-text pre>code,.jupyter-wrapper .bp3-code-block>code{background:none;-webkit-box-shadow:none;box-shadow:none;color:inherit;font-size:inherit;padding:0}.jupyter-wrapper .bp3-running-text kbd,.jupyter-wrapper .bp3-key{-webkit-box-align:center;-ms-flex-align:center;align-items:center;background:#ffffff;border-radius:3px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 0 #10161a00,0 1px 1px #10161a33;color:#5c7080;display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;font-family:inherit;font-size:12px;height:24px;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;line-height:24px;min-width:24px;padding:3px 6px;vertical-align:middle}.jupyter-wrapper .bp3-running-text kbd .bp3-icon,.jupyter-wrapper .bp3-key .bp3-icon,.jupyter-wrapper .bp3-running-text kbd .bp3-icon-standard,.jupyter-wrapper .bp3-key .bp3-icon-standard,.jupyter-wrapper .bp3-running-text kbd .bp3-icon-large,.jupyter-wrapper .bp3-key .bp3-icon-large{margin-right:5px}.jupyter-wrapper .bp3-dark .bp3-running-text kbd,.jupyter-wrapper .bp3-running-text .bp3-dark kbd,.jupyter-wrapper .bp3-dark .bp3-key{background:#394b59;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 0 #10161a00,0 1px 1px #10161a66;color:#a7b6c2}.jupyter-wrapper .bp3-running-text blockquote,.jupyter-wrapper .bp3-blockquote{border-left:solid 4px rgba(167,182,194,.5);margin:0 0 10px;padding:0 20px}.jupyter-wrapper .bp3-dark .bp3-running-text blockquote,.jupyter-wrapper .bp3-running-text .bp3-dark blockquote,.jupyter-wrapper .bp3-dark .bp3-blockquote{border-color:#73869480}.jupyter-wrapper .bp3-running-text ul,.jupyter-wrapper .bp3-running-text ol,.jupyter-wrapper .bp3-list{margin:10px 0;padding-left:30px}.jupyter-wrapper .bp3-running-text ul li:not(:last-child),.jupyter-wrapper .bp3-running-text ol li:not(:last-child),.jupyter-wrapper .bp3-list li:not(:last-child){margin-bottom:5px}.jupyter-wrapper .bp3-running-text ul ol,.jupyter-wrapper .bp3-running-text ol ol,.jupyter-wrapper .bp3-list ol,.jupyter-wrapper .bp3-running-text ul ul,.jupyter-wrapper .bp3-running-text ol ul,.jupyter-wrapper .bp3-list ul{margin-top:5px}.jupyter-wrapper .bp3-list-unstyled{list-style:none;margin:0;padding:0}.jupyter-wrapper .bp3-list-unstyled li{padding:0}.jupyter-wrapper .bp3-rtl{text-align:right}.jupyter-wrapper .bp3-dark{color:#f5f8fa}.jupyter-wrapper :focus{outline:rgba(19,124,189,.6) auto 2px;outline-offset:2px;-moz-outline-radius:6px}.jupyter-wrapper .bp3-focus-disabled :focus{outline:none!important}.jupyter-wrapper .bp3-focus-disabled :focus~.bp3-control-indicator{outline:none!important}.jupyter-wrapper .bp3-alert{max-width:400px;padding:20px}.jupyter-wrapper .bp3-alert-body{display:-webkit-box;display:-ms-flexbox;display:flex}.jupyter-wrapper .bp3-alert-body .bp3-icon{font-size:40px;margin-right:20px;margin-top:0}.jupyter-wrapper .bp3-alert-contents{word-break:break-word}.jupyter-wrapper .bp3-alert-footer{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:reverse;-ms-flex-direction:row-reverse;flex-direction:row-reverse;margin-top:10px}.jupyter-wrapper .bp3-alert-footer .bp3-button{margin-left:10px}.jupyter-wrapper .bp3-breadcrumbs{-webkit-box-align:center;-ms-flex-align:center;align-items:center;cursor:default;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;height:30px;list-style:none;margin:0;padding:0}.jupyter-wrapper .bp3-breadcrumbs>li{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex}.jupyter-wrapper .bp3-breadcrumbs>li:after{background:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill-rule='evenodd' clip-rule='evenodd' d='M10.71 7.29l-4-4a1.003 1.003 0 00-1.42 1.42L8.59 8 5.3 11.29c-.19.18-.3.43-.3.71a1.003 1.003 0 001.71.71l4-4c.18-.18.29-.43.29-.71 0-.28-.11-.53-.29-.71z' fill='%235C7080'/%3e%3c/svg%3e\");content:\"\";display:block;height:16px;margin:0 5px;width:16px}.jupyter-wrapper .bp3-breadcrumbs>li:last-of-type:after{display:none}.jupyter-wrapper .bp3-breadcrumb,.jupyter-wrapper .bp3-breadcrumb-current,.jupyter-wrapper .bp3-breadcrumbs-collapsed{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;font-size:16px}.jupyter-wrapper .bp3-breadcrumb,.jupyter-wrapper .bp3-breadcrumbs-collapsed{color:#5c7080}.jupyter-wrapper .bp3-breadcrumb:hover{text-decoration:none}.jupyter-wrapper .bp3-breadcrumb.bp3-disabled{color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-breadcrumb .bp3-icon{margin-right:5px}.jupyter-wrapper .bp3-breadcrumb-current{color:inherit;font-weight:600}.jupyter-wrapper .bp3-breadcrumb-current .bp3-input{font-size:inherit;font-weight:inherit;vertical-align:baseline}.jupyter-wrapper .bp3-breadcrumbs-collapsed{background:#ced9e0;border:none;border-radius:3px;cursor:pointer;margin-right:2px;padding:1px 5px;vertical-align:text-bottom}.jupyter-wrapper .bp3-breadcrumbs-collapsed:before{background:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cg fill='%235C7080'%3e%3ccircle cx='2' cy='8.03' r='2'/%3e%3ccircle cx='14' cy='8.03' r='2'/%3e%3ccircle cx='8' cy='8.03' r='2'/%3e%3c/g%3e%3c/svg%3e\") center no-repeat;content:\"\";display:block;height:16px;width:16px}.jupyter-wrapper .bp3-breadcrumbs-collapsed:hover{background:#bfccd6;color:#182026;text-decoration:none}.jupyter-wrapper .bp3-dark .bp3-breadcrumb,.jupyter-wrapper .bp3-dark .bp3-breadcrumbs-collapsed{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-breadcrumbs>li:after{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-breadcrumb.bp3-disabled{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-breadcrumb-current{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-breadcrumbs-collapsed{background:rgba(16,22,26,.4)}.jupyter-wrapper .bp3-dark .bp3-breadcrumbs-collapsed:hover{background:rgba(16,22,26,.6);color:#f5f8fa}.jupyter-wrapper .bp3-button{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:center;-ms-flex-align:center;align-items:center;border:none;border-radius:3px;cursor:pointer;font-size:14px;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;padding:5px 10px;text-align:left;vertical-align:middle;min-height:30px;min-width:30px}.jupyter-wrapper .bp3-button>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-button>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-button:before,.jupyter-wrapper .bp3-button>*{margin-right:7px}.jupyter-wrapper .bp3-button:empty:before,.jupyter-wrapper .bp3-button>:last-child{margin-right:0}.jupyter-wrapper .bp3-button:empty{padding:0!important}.jupyter-wrapper .bp3-button:disabled,.jupyter-wrapper .bp3-button.bp3-disabled{cursor:not-allowed}.jupyter-wrapper .bp3-button.bp3-fill{display:-webkit-box;display:-ms-flexbox;display:flex;width:100%}.jupyter-wrapper .bp3-button.bp3-align-right,.jupyter-wrapper .bp3-align-right .bp3-button{text-align:right}.jupyter-wrapper .bp3-button.bp3-align-left,.jupyter-wrapper .bp3-align-left .bp3-button{text-align:left}.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]){background-color:#f5f8fa;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.8)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.8),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a;color:#182026}.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]):hover{background-clip:padding-box;background-color:#ebf1f5;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a}.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]):active,.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]).bp3-active{background-color:#d8e1e8;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]):disabled,.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]).bp3-disabled{background-color:#ced9e080;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;outline:none}.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]):disabled.bp3-active,.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]):disabled.bp3-active:hover,.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]).bp3-disabled.bp3-active,.jupyter-wrapper .bp3-button:not([class*=bp3-intent-]).bp3-disabled.bp3-active:hover{background:rgba(206,217,224,.7)}.jupyter-wrapper .bp3-button.bp3-intent-primary{background-color:#137cbd;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.1)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.1),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33;color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-primary:hover,.jupyter-wrapper .bp3-button.bp3-intent-primary:active,.jupyter-wrapper .bp3-button.bp3-intent-primary.bp3-active{color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-primary:hover{background-color:#106ba3;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-primary:active,.jupyter-wrapper .bp3-button.bp3-intent-primary.bp3-active{background-color:#0e5a8a;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-button.bp3-intent-primary.bp3-disabled{background-color:#137cbd80;background-image:none;border-color:transparent;-webkit-box-shadow:none;box-shadow:none;color:#fff9}.jupyter-wrapper .bp3-button.bp3-intent-success{background-color:#0f9960;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.1)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.1),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33;color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-success:hover,.jupyter-wrapper .bp3-button.bp3-intent-success:active,.jupyter-wrapper .bp3-button.bp3-intent-success.bp3-active{color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-success:hover{background-color:#0d8050;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-success:active,.jupyter-wrapper .bp3-button.bp3-intent-success.bp3-active{background-color:#0a6640;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-success:disabled,.jupyter-wrapper .bp3-button.bp3-intent-success.bp3-disabled{background-color:#0f996080;background-image:none;border-color:transparent;-webkit-box-shadow:none;box-shadow:none;color:#fff9}.jupyter-wrapper .bp3-button.bp3-intent-warning{background-color:#d9822b;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.1)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.1),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33;color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-warning:hover,.jupyter-wrapper .bp3-button.bp3-intent-warning:active,.jupyter-wrapper .bp3-button.bp3-intent-warning.bp3-active{color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-warning:hover{background-color:#bf7326;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-warning:active,.jupyter-wrapper .bp3-button.bp3-intent-warning.bp3-active{background-color:#a66321;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-button.bp3-intent-warning.bp3-disabled{background-color:#d9822b80;background-image:none;border-color:transparent;-webkit-box-shadow:none;box-shadow:none;color:#fff9}.jupyter-wrapper .bp3-button.bp3-intent-danger{background-color:#db3737;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.1)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.1),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33;color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-danger:hover,.jupyter-wrapper .bp3-button.bp3-intent-danger:active,.jupyter-wrapper .bp3-button.bp3-intent-danger.bp3-active{color:#fff}.jupyter-wrapper .bp3-button.bp3-intent-danger:hover{background-color:#c23030;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-danger:active,.jupyter-wrapper .bp3-button.bp3-intent-danger.bp3-active{background-color:#a82a2a;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-button.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-button.bp3-intent-danger.bp3-disabled{background-color:#db373780;background-image:none;border-color:transparent;-webkit-box-shadow:none;box-shadow:none;color:#fff9}.jupyter-wrapper .bp3-button[class*=bp3-intent-] .bp3-button-spinner .bp3-spinner-head{stroke:#fff}.jupyter-wrapper .bp3-button.bp3-large,.jupyter-wrapper .bp3-large .bp3-button{min-height:40px;min-width:40px;font-size:16px;padding:5px 15px}.jupyter-wrapper .bp3-button.bp3-large:before,.jupyter-wrapper .bp3-button.bp3-large>*,.jupyter-wrapper .bp3-large .bp3-button:before,.jupyter-wrapper .bp3-large .bp3-button>*{margin-right:10px}.jupyter-wrapper .bp3-button.bp3-large:empty:before,.jupyter-wrapper .bp3-button.bp3-large>:last-child,.jupyter-wrapper .bp3-large .bp3-button:empty:before,.jupyter-wrapper .bp3-large .bp3-button>:last-child{margin-right:0}.jupyter-wrapper .bp3-button.bp3-small,.jupyter-wrapper .bp3-small .bp3-button{min-height:24px;min-width:24px;padding:0 7px}.jupyter-wrapper .bp3-button.bp3-loading{position:relative}.jupyter-wrapper .bp3-button.bp3-loading[class*=bp3-icon-]:before{visibility:hidden}.jupyter-wrapper .bp3-button.bp3-loading .bp3-button-spinner{margin:0;position:absolute}.jupyter-wrapper .bp3-button.bp3-loading>:not(.bp3-button-spinner){visibility:hidden}.jupyter-wrapper .bp3-button[class*=bp3-icon-]:before{font-family:Icons16,sans-serif;font-size:16px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;color:#5c7080}.jupyter-wrapper .bp3-button .bp3-icon,.jupyter-wrapper .bp3-button .bp3-icon-standard,.jupyter-wrapper .bp3-button .bp3-icon-large{color:#5c7080}.jupyter-wrapper .bp3-button .bp3-icon.bp3-align-right,.jupyter-wrapper .bp3-button .bp3-icon-standard.bp3-align-right,.jupyter-wrapper .bp3-button .bp3-icon-large.bp3-align-right{margin-left:7px}.jupyter-wrapper .bp3-button .bp3-icon:first-child:last-child,.jupyter-wrapper .bp3-button .bp3-spinner+.bp3-icon:last-child{margin:0 -7px}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]){background-color:#394b59;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.05)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.05),rgba(255,255,255,0));-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]):hover,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]):active,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]).bp3-active{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]):hover{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]):active,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]).bp3-active{background-color:#202b33;background-image:none;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.6),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a99,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]):disabled,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]).bp3-disabled{background-color:#394b5980;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]):disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]).bp3-disabled.bp3-active{background:rgba(57,75,89,.7)}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]) .bp3-button-spinner .bp3-spinner-head{background:rgba(16,22,26,.5);stroke:#8a9ba8}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-])[class*=bp3-icon-]:before{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]) .bp3-icon,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]) .bp3-icon-standard,.jupyter-wrapper .bp3-dark .bp3-button:not([class*=bp3-intent-]) .bp3-icon-large{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-],.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-]:hover{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-]:active,.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-].bp3-active{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-]:disabled,.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-].bp3-disabled{background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#ffffff4d}.jupyter-wrapper .bp3-dark .bp3-button[class*=bp3-intent-] .bp3-button-spinner .bp3-spinner-head{stroke:#8a9ba8}.jupyter-wrapper .bp3-button:disabled:before,.jupyter-wrapper .bp3-button:disabled .bp3-icon,.jupyter-wrapper .bp3-button:disabled .bp3-icon-standard,.jupyter-wrapper .bp3-button:disabled .bp3-icon-large,.jupyter-wrapper .bp3-button.bp3-disabled:before,.jupyter-wrapper .bp3-button.bp3-disabled .bp3-icon,.jupyter-wrapper .bp3-button.bp3-disabled .bp3-icon-standard,.jupyter-wrapper .bp3-button.bp3-disabled .bp3-icon-large,.jupyter-wrapper .bp3-button[class*=bp3-intent-]:before,.jupyter-wrapper .bp3-button[class*=bp3-intent-] .bp3-icon,.jupyter-wrapper .bp3-button[class*=bp3-intent-] .bp3-icon-standard,.jupyter-wrapper .bp3-button[class*=bp3-intent-] .bp3-icon-large{color:inherit!important}.jupyter-wrapper .bp3-button.bp3-minimal{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-button.bp3-minimal:hover{background:rgba(167,182,194,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026;text-decoration:none}.jupyter-wrapper .bp3-button.bp3-minimal:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-active{background:rgba(115,134,148,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026}.jupyter-wrapper .bp3-button.bp3-minimal:disabled,.jupyter-wrapper .bp3-button.bp3-minimal:disabled:hover,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-disabled,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-disabled:hover{background:none;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-button.bp3-minimal:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal:disabled:hover.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-disabled:hover.bp3-active{background:rgba(115,134,148,.3)}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal{background:none;-webkit-box-shadow:none;box-shadow:none;color:inherit}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:hover,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:hover{background:rgba(138,155,168,.15)}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-active{background:rgba(138,155,168,.3);color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:disabled:hover,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-disabled:hover{background:none;color:#a7b6c299;cursor:not-allowed}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal:disabled:hover.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-disabled:hover.bp3-active{background:rgba(138,155,168,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary{color:#106ba3}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary:hover,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#106ba3}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary:hover{background:rgba(19,124,189,.15);color:#106ba3}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#106ba3}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary.bp3-disabled{background:none;color:#106ba380}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-primary .bp3-button-spinner .bp3-spinner-head{stroke:#106ba3}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary:hover{background:rgba(19,124,189,.2);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary.bp3-disabled{background:none;color:#48aff080}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success{color:#0d8050}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success:hover,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#0d8050}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success:hover{background:rgba(15,153,96,.15);color:#0d8050}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#0d8050}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success:disabled,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success.bp3-disabled{background:none;color:#0d805080}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-success .bp3-button-spinner .bp3-spinner-head{stroke:#0d8050}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success:hover{background:rgba(15,153,96,.2);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success.bp3-disabled{background:none;color:#3dcc9180}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning{color:#bf7326}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning:hover,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#bf7326}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning:hover{background:rgba(217,130,43,.15);color:#bf7326}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#bf7326}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning.bp3-disabled{background:none;color:#bf732680}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-warning .bp3-button-spinner .bp3-spinner-head{stroke:#bf7326}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning:hover{background:rgba(217,130,43,.2);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning.bp3-disabled{background:none;color:#ffb36680}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger{color:#c23030}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger:hover,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#c23030}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger:hover{background:rgba(219,55,55,.15);color:#c23030}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger:active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#c23030}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger.bp3-disabled{background:none;color:#c2303080}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-button.bp3-minimal.bp3-intent-danger .bp3-button-spinner .bp3-spinner-head{stroke:#c23030}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger:hover{background:rgba(219,55,55,.2);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger.bp3-disabled{background:none;color:#ff737380}.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-minimal.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-button.bp3-outlined{background:none;-webkit-box-shadow:none;box-shadow:none;border:1px solid rgba(24,32,38,.2);-webkit-box-sizing:border-box;box-sizing:border-box}.jupyter-wrapper .bp3-button.bp3-outlined:hover{background:rgba(167,182,194,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026;text-decoration:none}.jupyter-wrapper .bp3-button.bp3-outlined:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-active{background:rgba(115,134,148,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026}.jupyter-wrapper .bp3-button.bp3-outlined:disabled,.jupyter-wrapper .bp3-button.bp3-outlined:disabled:hover,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-disabled:hover{background:none;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-button.bp3-outlined:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined:disabled:hover.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-disabled:hover.bp3-active{background:rgba(115,134,148,.3)}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined{background:none;-webkit-box-shadow:none;box-shadow:none;color:inherit}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:hover,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:hover{background:rgba(138,155,168,.15)}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-active{background:rgba(138,155,168,.3);color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:disabled:hover,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-disabled:hover{background:none;color:#a7b6c299;cursor:not-allowed}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:disabled:hover.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-disabled:hover.bp3-active{background:rgba(138,155,168,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary{color:#106ba3}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:hover,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#106ba3}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:hover{background:rgba(19,124,189,.15);color:#106ba3}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#106ba3}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary.bp3-disabled{background:none;color:#106ba380}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary .bp3-button-spinner .bp3-spinner-head{stroke:#106ba3}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary:hover{background:rgba(19,124,189,.2);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary.bp3-disabled{background:none;color:#48aff080}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success{color:#0d8050}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:hover,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#0d8050}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:hover{background:rgba(15,153,96,.15);color:#0d8050}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#0d8050}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success.bp3-disabled{background:none;color:#0d805080}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success .bp3-button-spinner .bp3-spinner-head{stroke:#0d8050}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success:hover{background:rgba(15,153,96,.2);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success.bp3-disabled{background:none;color:#3dcc9180}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning{color:#bf7326}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:hover,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#bf7326}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:hover{background:rgba(217,130,43,.15);color:#bf7326}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#bf7326}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning.bp3-disabled{background:none;color:#bf732680}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning .bp3-button-spinner .bp3-spinner-head{stroke:#bf7326}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning:hover{background:rgba(217,130,43,.2);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning.bp3-disabled{background:none;color:#ffb36680}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger{color:#c23030}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:hover,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#c23030}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:hover{background:rgba(219,55,55,.15);color:#c23030}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#c23030}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger.bp3-disabled{background:none;color:#c2303080}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger .bp3-button-spinner .bp3-spinner-head{stroke:#c23030}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger:hover{background:rgba(219,55,55,.2);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger:active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger.bp3-disabled{background:none;color:#ff737380}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-button.bp3-outlined:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-disabled,.jupyter-wrapper .bp3-button.bp3-outlined:disabled:hover,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-disabled:hover{border-color:#5c70801a}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined{border-color:#fff6}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined:disabled:hover,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-disabled:hover{border-color:#fff3}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary{border-color:#106ba399}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-primary.bp3-disabled{border-color:#106ba333}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary{border-color:#48aff099}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-primary.bp3-disabled{border-color:#48aff033}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success{border-color:#0d805099}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-success.bp3-disabled{border-color:#0d805033}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success{border-color:#3dcc9199}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-success.bp3-disabled{border-color:#3dcc9133}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning{border-color:#bf732699}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-warning.bp3-disabled{border-color:#bf732633}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning{border-color:#ffb36699}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-warning.bp3-disabled{border-color:#ffb36633}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger{border-color:#c2303099}.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-button.bp3-outlined.bp3-intent-danger.bp3-disabled{border-color:#c2303033}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger{border-color:#ff737399}.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-button.bp3-outlined.bp3-intent-danger.bp3-disabled{border-color:#ff737333}.jupyter-wrapper a.bp3-button{text-align:center;text-decoration:none;-webkit-transition:none;transition:none}.jupyter-wrapper a.bp3-button,.jupyter-wrapper a.bp3-button:hover,.jupyter-wrapper a.bp3-button:active{color:#182026}.jupyter-wrapper a.bp3-button.bp3-disabled{color:#5c708099}.jupyter-wrapper .bp3-button-text{-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto}.jupyter-wrapper .bp3-button.bp3-align-left .bp3-button-text,.jupyter-wrapper .bp3-button.bp3-align-right .bp3-button-text,.jupyter-wrapper .bp3-button-group.bp3-align-left .bp3-button-text,.jupyter-wrapper .bp3-button-group.bp3-align-right .bp3-button-text{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto}.jupyter-wrapper .bp3-button-group{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex}.jupyter-wrapper .bp3-button-group .bp3-button{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;position:relative;z-index:4}.jupyter-wrapper .bp3-button-group .bp3-button:focus{z-index:5}.jupyter-wrapper .bp3-button-group .bp3-button:hover{z-index:6}.jupyter-wrapper .bp3-button-group .bp3-button:active,.jupyter-wrapper .bp3-button-group .bp3-button.bp3-active{z-index:7}.jupyter-wrapper .bp3-button-group .bp3-button:disabled,.jupyter-wrapper .bp3-button-group .bp3-button.bp3-disabled{z-index:3}.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-]{z-index:9}.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-]:focus{z-index:10}.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-]:hover{z-index:11}.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-]:active,.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-].bp3-active{z-index:12}.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-]:disabled,.jupyter-wrapper .bp3-button-group .bp3-button[class*=bp3-intent-].bp3-disabled{z-index:8}.jupyter-wrapper .bp3-button-group:not(.bp3-minimal)>.bp3-popover-wrapper:not(:first-child) .bp3-button,.jupyter-wrapper .bp3-button-group:not(.bp3-minimal)>.bp3-button:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.jupyter-wrapper .bp3-button-group:not(.bp3-minimal)>.bp3-popover-wrapper:not(:last-child) .bp3-button,.jupyter-wrapper .bp3-button-group:not(.bp3-minimal)>.bp3-button:not(:last-child){border-bottom-right-radius:0;border-top-right-radius:0;margin-right:-1px}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button:hover{background:rgba(167,182,194,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026;text-decoration:none}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-active{background:rgba(115,134,148,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button:disabled,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button:disabled:hover,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled:hover{background:none;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button:disabled.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button:disabled:hover.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled:hover.bp3-active{background:rgba(115,134,148,.3)}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button{background:none;-webkit-box-shadow:none;box-shadow:none;color:inherit}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:hover,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:hover{background:rgba(138,155,168,.15)}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-active{background:rgba(138,155,168,.3);color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:disabled,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:disabled:hover,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled:hover{background:none;color:#a7b6c299;cursor:not-allowed}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button:disabled:hover.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-disabled:hover.bp3-active{background:rgba(138,155,168,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary{color:#106ba3}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:hover,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#106ba3}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:hover{background:rgba(19,124,189,.15);color:#106ba3}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#106ba3}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-disabled{background:none;color:#106ba380}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary .bp3-button-spinner .bp3-spinner-head{stroke:#106ba3}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:hover{background:rgba(19,124,189,.2);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-disabled{background:none;color:#48aff080}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success{color:#0d8050}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:hover,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#0d8050}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:hover{background:rgba(15,153,96,.15);color:#0d8050}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#0d8050}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:disabled,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-disabled{background:none;color:#0d805080}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success .bp3-button-spinner .bp3-spinner-head{stroke:#0d8050}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:hover{background:rgba(15,153,96,.2);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-disabled{background:none;color:#3dcc9180}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning{color:#bf7326}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:hover,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#bf7326}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:hover{background:rgba(217,130,43,.15);color:#bf7326}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#bf7326}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-disabled{background:none;color:#bf732680}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning .bp3-button-spinner .bp3-spinner-head{stroke:#bf7326}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:hover{background:rgba(217,130,43,.2);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-disabled{background:none;color:#ffb36680}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger{color:#c23030}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:hover,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#c23030}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:hover{background:rgba(219,55,55,.15);color:#c23030}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#c23030}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-disabled{background:none;color:#c2303080}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger .bp3-button-spinner .bp3-spinner-head{stroke:#c23030}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:hover{background:rgba(219,55,55,.2);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-disabled{background:none;color:#ff737380}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-minimal .bp3-button.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-button-group .bp3-popover-wrapper,.jupyter-wrapper .bp3-button-group .bp3-popover-target{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto}.jupyter-wrapper .bp3-button-group.bp3-fill{display:-webkit-box;display:-ms-flexbox;display:flex;width:100%}.jupyter-wrapper .bp3-button-group .bp3-button.bp3-fill,.jupyter-wrapper .bp3-button-group.bp3-fill .bp3-button:not(.bp3-fixed){-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto}.jupyter-wrapper .bp3-button-group.bp3-vertical{-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;vertical-align:top}.jupyter-wrapper .bp3-button-group.bp3-vertical.bp3-fill{height:100%;width:unset}.jupyter-wrapper .bp3-button-group.bp3-vertical .bp3-button{margin-right:0!important;width:100%}.jupyter-wrapper .bp3-button-group.bp3-vertical:not(.bp3-minimal)>.bp3-popover-wrapper:first-child .bp3-button,.jupyter-wrapper .bp3-button-group.bp3-vertical:not(.bp3-minimal)>.bp3-button:first-child{border-radius:3px 3px 0 0}.jupyter-wrapper .bp3-button-group.bp3-vertical:not(.bp3-minimal)>.bp3-popover-wrapper:last-child .bp3-button,.jupyter-wrapper .bp3-button-group.bp3-vertical:not(.bp3-minimal)>.bp3-button:last-child{border-radius:0 0 3px 3px}.jupyter-wrapper .bp3-button-group.bp3-vertical:not(.bp3-minimal)>.bp3-popover-wrapper:not(:last-child) .bp3-button,.jupyter-wrapper .bp3-button-group.bp3-vertical:not(.bp3-minimal)>.bp3-button:not(:last-child){margin-bottom:-1px}.jupyter-wrapper .bp3-button-group.bp3-align-left .bp3-button{text-align:left}.jupyter-wrapper .bp3-dark .bp3-button-group:not(.bp3-minimal)>.bp3-popover-wrapper:not(:last-child) .bp3-button,.jupyter-wrapper .bp3-dark .bp3-button-group:not(.bp3-minimal)>.bp3-button:not(:last-child){margin-right:1px}.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-vertical>.bp3-popover-wrapper:not(:last-child) .bp3-button,.jupyter-wrapper .bp3-dark .bp3-button-group.bp3-vertical>.bp3-button:not(:last-child){margin-bottom:1px}.jupyter-wrapper .bp3-callout{font-size:14px;line-height:1.5;background-color:#8a9ba826;border-radius:3px;padding:10px 12px 9px;position:relative;width:100%}.jupyter-wrapper .bp3-callout[class*=bp3-icon-]{padding-left:40px}.jupyter-wrapper .bp3-callout[class*=bp3-icon-]:before{font-family:Icons20,sans-serif;font-size:20px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;color:#5c7080;left:10px;position:absolute;top:10px}.jupyter-wrapper .bp3-callout.bp3-callout-icon{padding-left:40px}.jupyter-wrapper .bp3-callout.bp3-callout-icon>.bp3-icon:first-child{color:#5c7080;left:10px;position:absolute;top:10px}.jupyter-wrapper .bp3-callout .bp3-heading{line-height:20px;margin-bottom:5px;margin-top:0}.jupyter-wrapper .bp3-callout .bp3-heading:last-child{margin-bottom:0}.jupyter-wrapper .bp3-dark .bp3-callout{background-color:#8a9ba833}.jupyter-wrapper .bp3-dark .bp3-callout[class*=bp3-icon-]:before{color:#a7b6c2}.jupyter-wrapper .bp3-callout.bp3-intent-primary{background-color:#137cbd26}.jupyter-wrapper .bp3-callout.bp3-intent-primary[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-callout.bp3-intent-primary>.bp3-icon:first-child,.jupyter-wrapper .bp3-callout.bp3-intent-primary .bp3-heading{color:#106ba3}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-primary{background-color:#137cbd40}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-primary[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-primary>.bp3-icon:first-child,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-primary .bp3-heading{color:#48aff0}.jupyter-wrapper .bp3-callout.bp3-intent-success{background-color:#0f996026}.jupyter-wrapper .bp3-callout.bp3-intent-success[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-callout.bp3-intent-success>.bp3-icon:first-child,.jupyter-wrapper .bp3-callout.bp3-intent-success .bp3-heading{color:#0d8050}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-success{background-color:#0f996040}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-success[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-success>.bp3-icon:first-child,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-success .bp3-heading{color:#3dcc91}.jupyter-wrapper .bp3-callout.bp3-intent-warning{background-color:#d9822b26}.jupyter-wrapper .bp3-callout.bp3-intent-warning[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-callout.bp3-intent-warning>.bp3-icon:first-child,.jupyter-wrapper .bp3-callout.bp3-intent-warning .bp3-heading{color:#bf7326}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-warning{background-color:#d9822b40}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-warning[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-warning>.bp3-icon:first-child,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-warning .bp3-heading{color:#ffb366}.jupyter-wrapper .bp3-callout.bp3-intent-danger{background-color:#db373726}.jupyter-wrapper .bp3-callout.bp3-intent-danger[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-callout.bp3-intent-danger>.bp3-icon:first-child,.jupyter-wrapper .bp3-callout.bp3-intent-danger .bp3-heading{color:#c23030}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-danger{background-color:#db373740}.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-danger[class*=bp3-icon-]:before,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-danger>.bp3-icon:first-child,.jupyter-wrapper .bp3-dark .bp3-callout.bp3-intent-danger .bp3-heading{color:#ff7373}.jupyter-wrapper .bp3-running-text .bp3-callout{margin:20px 0}.jupyter-wrapper .bp3-card{background-color:#fff;border-radius:3px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.15),0 0 0 rgba(16,22,26,0),0 0 0 rgba(16,22,26,0);box-shadow:0 0 0 1px #10161a26,0 0 #10161a00,0 0 #10161a00;padding:20px;-webkit-transition:-webkit-transform .2s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .2s cubic-bezier(.4,1,.75,.9);transition:-webkit-transform .2s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .2s cubic-bezier(.4,1,.75,.9);transition:transform .2s cubic-bezier(.4,1,.75,.9),box-shadow .2s cubic-bezier(.4,1,.75,.9);transition:transform .2s cubic-bezier(.4,1,.75,.9),box-shadow .2s cubic-bezier(.4,1,.75,.9),-webkit-transform .2s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .2s cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-card.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-card{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4),0 0 0 rgba(16,22,26,0),0 0 0 rgba(16,22,26,0);box-shadow:0 0 0 1px #10161a66,0 0 #10161a00,0 0 #10161a00}.jupyter-wrapper .bp3-elevation-0{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.15),0 0 0 rgba(16,22,26,0),0 0 0 rgba(16,22,26,0);box-shadow:0 0 0 1px #10161a26,0 0 #10161a00,0 0 #10161a00}.jupyter-wrapper .bp3-elevation-0.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-elevation-0{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4),0 0 0 rgba(16,22,26,0),0 0 0 rgba(16,22,26,0);box-shadow:0 0 0 1px #10161a66,0 0 #10161a00,0 0 #10161a00}.jupyter-wrapper .bp3-elevation-1{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 0 #10161a00,0 1px 1px #10161a33}.jupyter-wrapper .bp3-elevation-1.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-elevation-1{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 0 #10161a00,0 1px 1px #10161a66}.jupyter-wrapper .bp3-elevation-2{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 1px 1px rgba(16,22,26,.2),0 2px 6px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 1px 1px #10161a33,0 2px 6px #10161a33}.jupyter-wrapper .bp3-elevation-2.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-elevation-2{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 1px 1px rgba(16,22,26,.4),0 2px 6px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 1px 1px #10161a66,0 2px 6px #10161a66}.jupyter-wrapper .bp3-elevation-3{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33}.jupyter-wrapper .bp3-elevation-3.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-elevation-3{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66}.jupyter-wrapper .bp3-elevation-4{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 4px 8px rgba(16,22,26,.2),0 18px 46px 6px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 4px 8px #10161a33,0 18px 46px 6px #10161a33}.jupyter-wrapper .bp3-elevation-4.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-elevation-4{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 4px 8px rgba(16,22,26,.4),0 18px 46px 6px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 4px 8px #10161a66,0 18px 46px 6px #10161a66}.jupyter-wrapper .bp3-card.bp3-interactive:hover{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33;cursor:pointer}.jupyter-wrapper .bp3-card.bp3-interactive:hover.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-card.bp3-interactive:hover{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66}.jupyter-wrapper .bp3-card.bp3-interactive:active{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 0 #10161a00,0 1px 1px #10161a33;opacity:.9;-webkit-transition-duration:0;transition-duration:0}.jupyter-wrapper .bp3-card.bp3-interactive:active.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-card.bp3-interactive:active{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 0 #10161a00,0 1px 1px #10161a66}.jupyter-wrapper .bp3-collapse{height:0;overflow-y:hidden;-webkit-transition:height .2s cubic-bezier(.4,1,.75,.9);transition:height .2s cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-collapse .bp3-collapse-body{-webkit-transition:-webkit-transform .2s cubic-bezier(.4,1,.75,.9);transition:-webkit-transform .2s cubic-bezier(.4,1,.75,.9);transition:transform .2s cubic-bezier(.4,1,.75,.9);transition:transform .2s cubic-bezier(.4,1,.75,.9),-webkit-transform .2s cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-collapse .bp3-collapse-body[aria-hidden=true]{display:none}.jupyter-wrapper .bp3-context-menu .bp3-popover-target{display:block}.jupyter-wrapper .bp3-context-menu-popover-target{position:fixed}.jupyter-wrapper .bp3-dialog-container{opacity:1;-webkit-transform:scale(1);transform:scale(1);-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;min-height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;width:100%}.jupyter-wrapper .bp3-dialog-container.bp3-overlay-enter>.bp3-dialog,.jupyter-wrapper .bp3-dialog-container.bp3-overlay-appear>.bp3-dialog{opacity:0;-webkit-transform:scale(.5);transform:scale(.5)}.jupyter-wrapper .bp3-dialog-container.bp3-overlay-enter-active>.bp3-dialog,.jupyter-wrapper .bp3-dialog-container.bp3-overlay-appear-active>.bp3-dialog{opacity:1;-webkit-transform:scale(1);transform:scale(1);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:opacity,transform;transition-property:opacity,transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.54,1.12,.38,1.11);transition-timing-function:cubic-bezier(.54,1.12,.38,1.11)}.jupyter-wrapper .bp3-dialog-container.bp3-overlay-exit>.bp3-dialog{opacity:1;-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-dialog-container.bp3-overlay-exit-active>.bp3-dialog{opacity:0;-webkit-transform:scale(.5);transform:scale(.5);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:opacity,transform;transition-property:opacity,transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.54,1.12,.38,1.11);transition-timing-function:cubic-bezier(.54,1.12,.38,1.11)}.jupyter-wrapper .bp3-dialog{background:#ebf1f5;border-radius:6px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 4px 8px rgba(16,22,26,.2),0 18px 46px 6px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 4px 8px #10161a33,0 18px 46px 6px #10161a33;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin:30px 0;padding-bottom:20px;pointer-events:all;-webkit-user-select:text;-moz-user-select:text;-ms-user-select:text;user-select:text;width:500px}.jupyter-wrapper .bp3-dialog:focus{outline:0}.jupyter-wrapper .bp3-dialog.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-dialog{background:#293742;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 4px 8px rgba(16,22,26,.4),0 18px 46px 6px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 4px 8px #10161a66,0 18px 46px 6px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dialog-header{-webkit-box-align:center;-ms-flex-align:center;align-items:center;background:#ffffff;border-radius:6px 6px 0 0;-webkit-box-shadow:0 1px 0 rgba(16,22,26,.15);box-shadow:0 1px #10161a26;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;min-height:40px;padding-left:20px;padding-right:5px;z-index:30}.jupyter-wrapper .bp3-dialog-header .bp3-icon-large,.jupyter-wrapper .bp3-dialog-header .bp3-icon{color:#5c7080;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;margin-right:10px}.jupyter-wrapper .bp3-dialog-header .bp3-heading{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;line-height:inherit;margin:0}.jupyter-wrapper .bp3-dialog-header .bp3-heading:last-child{margin-right:20px}.jupyter-wrapper .bp3-dark .bp3-dialog-header{background:#30404d;-webkit-box-shadow:0 1px 0 rgba(16,22,26,.4);box-shadow:0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-dialog-header .bp3-icon-large,.jupyter-wrapper .bp3-dark .bp3-dialog-header .bp3-icon{color:#a7b6c2}.jupyter-wrapper .bp3-dialog-body{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;line-height:18px;margin:20px}.jupyter-wrapper .bp3-dialog-footer{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;margin:0 20px}.jupyter-wrapper .bp3-dialog-footer-actions{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end}.jupyter-wrapper .bp3-dialog-footer-actions .bp3-button{margin-left:10px}.jupyter-wrapper .bp3-multistep-dialog-panels{display:-webkit-box;display:-ms-flexbox;display:flex}.jupyter-wrapper .bp3-multistep-dialog-left-panel{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1;flex:1;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.jupyter-wrapper .bp3-dark .bp3-multistep-dialog-left-panel{background:#202b33}.jupyter-wrapper .bp3-multistep-dialog-right-panel{background-color:#f5f8fa;border-left:1px solid rgba(16,22,26,.15);border-radius:0 0 6px;-webkit-box-flex:3;-ms-flex:3;flex:3;min-width:0}.jupyter-wrapper .bp3-dark .bp3-multistep-dialog-right-panel{background-color:#293742;border-left:1px solid rgba(16,22,26,.4)}.jupyter-wrapper .bp3-multistep-dialog-footer{background-color:#fff;border-radius:0 0 6px;border-top:1px solid rgba(16,22,26,.15);padding:10px}.jupyter-wrapper .bp3-dark .bp3-multistep-dialog-footer{background:#30404d;border-top:1px solid rgba(16,22,26,.4)}.jupyter-wrapper .bp3-dialog-step-container{background-color:#f5f8fa;border-bottom:1px solid rgba(16,22,26,.15)}.jupyter-wrapper .bp3-dark .bp3-dialog-step-container{background:#293742;border-bottom:1px solid rgba(16,22,26,.4)}.jupyter-wrapper .bp3-dialog-step-container.bp3-dialog-step-viewed{background-color:#fff}.jupyter-wrapper .bp3-dark .bp3-dialog-step-container.bp3-dialog-step-viewed{background:#30404d}.jupyter-wrapper .bp3-dialog-step{-webkit-box-align:center;-ms-flex-align:center;align-items:center;background-color:#f5f8fa;border-radius:6px;cursor:not-allowed;display:-webkit-box;display:-ms-flexbox;display:flex;margin:4px;padding:6px 14px}.jupyter-wrapper .bp3-dark .bp3-dialog-step{background:#293742}.jupyter-wrapper .bp3-dialog-step-viewed .bp3-dialog-step{background-color:#fff;cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-dialog-step-viewed .bp3-dialog-step{background:#30404d}.jupyter-wrapper .bp3-dialog-step:hover{background-color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-dialog-step:hover{background:#293742}.jupyter-wrapper .bp3-dialog-step-icon{-webkit-box-align:center;-ms-flex-align:center;align-items:center;background-color:#5c708099;border-radius:50%;color:#fff;display:-webkit-box;display:-ms-flexbox;display:flex;height:25px;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:25px}.jupyter-wrapper .bp3-dark .bp3-dialog-step-icon{background-color:#a7b6c299}.jupyter-wrapper .bp3-active.bp3-dialog-step-viewed .bp3-dialog-step-icon{background-color:#2b95d6}.jupyter-wrapper .bp3-dialog-step-viewed .bp3-dialog-step-icon{background-color:#8a9ba8}.jupyter-wrapper .bp3-dialog-step-title{color:#5c708099;-webkit-box-flex:1;-ms-flex:1;flex:1;padding-left:10px}.jupyter-wrapper .bp3-dark .bp3-dialog-step-title{color:#a7b6c299}.jupyter-wrapper .bp3-active.bp3-dialog-step-viewed .bp3-dialog-step-title{color:#2b95d6}.jupyter-wrapper .bp3-dialog-step-viewed:not(.bp3-active) .bp3-dialog-step-title{color:#182026}.jupyter-wrapper .bp3-dark .bp3-dialog-step-viewed:not(.bp3-active) .bp3-dialog-step-title{color:#f5f8fa}.jupyter-wrapper .bp3-drawer{background:#ffffff;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 4px 8px rgba(16,22,26,.2),0 18px 46px 6px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 4px 8px #10161a33,0 18px 46px 6px #10161a33;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin:0;padding:0}.jupyter-wrapper .bp3-drawer:focus{outline:0}.jupyter-wrapper .bp3-drawer.bp3-position-top{height:50%;left:0;right:0;top:0}.jupyter-wrapper .bp3-drawer.bp3-position-top.bp3-overlay-enter,.jupyter-wrapper .bp3-drawer.bp3-position-top.bp3-overlay-appear{-webkit-transform:translateY(-100%);transform:translateY(-100%)}.jupyter-wrapper .bp3-drawer.bp3-position-top.bp3-overlay-enter-active,.jupyter-wrapper .bp3-drawer.bp3-position-top.bp3-overlay-appear-active{-webkit-transform:translateY(0);transform:translateY(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-top.bp3-overlay-exit{-webkit-transform:translateY(0);transform:translateY(0)}.jupyter-wrapper .bp3-drawer.bp3-position-top.bp3-overlay-exit-active{-webkit-transform:translateY(-100%);transform:translateY(-100%);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-bottom{bottom:0;height:50%;left:0;right:0}.jupyter-wrapper .bp3-drawer.bp3-position-bottom.bp3-overlay-enter,.jupyter-wrapper .bp3-drawer.bp3-position-bottom.bp3-overlay-appear{-webkit-transform:translateY(100%);transform:translateY(100%)}.jupyter-wrapper .bp3-drawer.bp3-position-bottom.bp3-overlay-enter-active,.jupyter-wrapper .bp3-drawer.bp3-position-bottom.bp3-overlay-appear-active{-webkit-transform:translateY(0);transform:translateY(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-bottom.bp3-overlay-exit{-webkit-transform:translateY(0);transform:translateY(0)}.jupyter-wrapper .bp3-drawer.bp3-position-bottom.bp3-overlay-exit-active{-webkit-transform:translateY(100%);transform:translateY(100%);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-left{bottom:0;left:0;top:0;width:50%}.jupyter-wrapper .bp3-drawer.bp3-position-left.bp3-overlay-enter,.jupyter-wrapper .bp3-drawer.bp3-position-left.bp3-overlay-appear{-webkit-transform:translateX(-100%);transform:translate(-100%)}.jupyter-wrapper .bp3-drawer.bp3-position-left.bp3-overlay-enter-active,.jupyter-wrapper .bp3-drawer.bp3-position-left.bp3-overlay-appear-active{-webkit-transform:translateX(0);transform:translate(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-left.bp3-overlay-exit{-webkit-transform:translateX(0);transform:translate(0)}.jupyter-wrapper .bp3-drawer.bp3-position-left.bp3-overlay-exit-active{-webkit-transform:translateX(-100%);transform:translate(-100%);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-right{bottom:0;right:0;top:0;width:50%}.jupyter-wrapper .bp3-drawer.bp3-position-right.bp3-overlay-enter,.jupyter-wrapper .bp3-drawer.bp3-position-right.bp3-overlay-appear{-webkit-transform:translateX(100%);transform:translate(100%)}.jupyter-wrapper .bp3-drawer.bp3-position-right.bp3-overlay-enter-active,.jupyter-wrapper .bp3-drawer.bp3-position-right.bp3-overlay-appear-active{-webkit-transform:translateX(0);transform:translate(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-position-right.bp3-overlay-exit{-webkit-transform:translateX(0);transform:translate(0)}.jupyter-wrapper .bp3-drawer.bp3-position-right.bp3-overlay-exit-active{-webkit-transform:translateX(100%);transform:translate(100%);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical){bottom:0;right:0;top:0;width:50%}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical).bp3-overlay-enter,.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical).bp3-overlay-appear{-webkit-transform:translateX(100%);transform:translate(100%)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical).bp3-overlay-enter-active,.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical).bp3-overlay-appear-active{-webkit-transform:translateX(0);transform:translate(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical).bp3-overlay-exit{-webkit-transform:translateX(0);transform:translate(0)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right):not(.bp3-vertical).bp3-overlay-exit-active{-webkit-transform:translateX(100%);transform:translate(100%);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical{bottom:0;height:50%;left:0;right:0}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical.bp3-overlay-enter,.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical.bp3-overlay-appear{-webkit-transform:translateY(100%);transform:translateY(100%)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical.bp3-overlay-enter-active,.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical.bp3-overlay-appear-active{-webkit-transform:translateY(0);transform:translateY(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical.bp3-overlay-exit{-webkit-transform:translateY(0);transform:translateY(0)}.jupyter-wrapper .bp3-drawer:not(.bp3-position-top):not(.bp3-position-bottom):not(.bp3-position-left):not(.bp3-position-right).bp3-vertical.bp3-overlay-exit-active{-webkit-transform:translateY(100%);transform:translateY(100%);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-drawer.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-drawer{background:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 4px 8px rgba(16,22,26,.4),0 18px 46px 6px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 4px 8px #10161a66,0 18px 46px 6px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-drawer-header{-webkit-box-align:center;-ms-flex-align:center;align-items:center;border-radius:0;-webkit-box-shadow:0 1px 0 rgba(16,22,26,.15);box-shadow:0 1px #10161a26;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;min-height:40px;padding:5px 5px 5px 20px;position:relative}.jupyter-wrapper .bp3-drawer-header .bp3-icon-large,.jupyter-wrapper .bp3-drawer-header .bp3-icon{color:#5c7080;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;margin-right:10px}.jupyter-wrapper .bp3-drawer-header .bp3-heading{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;line-height:inherit;margin:0}.jupyter-wrapper .bp3-drawer-header .bp3-heading:last-child{margin-right:20px}.jupyter-wrapper .bp3-dark .bp3-drawer-header{-webkit-box-shadow:0 1px 0 rgba(16,22,26,.4);box-shadow:0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-drawer-header .bp3-icon-large,.jupyter-wrapper .bp3-dark .bp3-drawer-header .bp3-icon{color:#a7b6c2}.jupyter-wrapper .bp3-drawer-body{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;line-height:18px;overflow:auto}.jupyter-wrapper .bp3-drawer-footer{-webkit-box-shadow:inset 0 1px 0 rgba(16,22,26,.15);box-shadow:inset 0 1px #10161a26;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;padding:10px 20px;position:relative}.jupyter-wrapper .bp3-dark .bp3-drawer-footer{-webkit-box-shadow:inset 0 1px 0 rgba(16,22,26,.4);box-shadow:inset 0 1px #10161a66}.jupyter-wrapper .bp3-editable-text{cursor:text;display:inline-block;max-width:100%;position:relative;vertical-align:top;white-space:nowrap}.jupyter-wrapper .bp3-editable-text:before{bottom:-3px;left:-3px;position:absolute;right:-3px;top:-3px;border-radius:3px;content:\"\";-webkit-transition:background-color .1s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:background-color .1s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:background-color .1s cubic-bezier(.4,1,.75,.9),box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:background-color .1s cubic-bezier(.4,1,.75,.9),box-shadow .1s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-editable-text:hover:before{-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(16,22,26,.15);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #10161a26}.jupyter-wrapper .bp3-editable-text.bp3-editable-text-editing:before{background-color:#fff;-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-editable-text.bp3-disabled:before{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-editable-text.bp3-intent-primary .bp3-editable-text-input,.jupyter-wrapper .bp3-editable-text.bp3-intent-primary .bp3-editable-text-content{color:#137cbd}.jupyter-wrapper .bp3-editable-text.bp3-intent-primary:hover:before{-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(19,124,189,.4);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #137cbd66}.jupyter-wrapper .bp3-editable-text.bp3-intent-primary.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-editable-text.bp3-intent-success .bp3-editable-text-input,.jupyter-wrapper .bp3-editable-text.bp3-intent-success .bp3-editable-text-content{color:#0f9960}.jupyter-wrapper .bp3-editable-text.bp3-intent-success:hover:before{-webkit-box-shadow:0 0 0 0 rgba(15,153,96,0),0 0 0 0 rgba(15,153,96,0),inset 0 0 0 1px rgba(15,153,96,.4);box-shadow:0 0 #0f996000,0 0 #0f996000,inset 0 0 0 1px #0f996066}.jupyter-wrapper .bp3-editable-text.bp3-intent-success.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #0f9960,0 0 0 3px rgba(15,153,96,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #0f9960,0 0 0 3px #0f99604d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-editable-text.bp3-intent-warning .bp3-editable-text-input,.jupyter-wrapper .bp3-editable-text.bp3-intent-warning .bp3-editable-text-content{color:#d9822b}.jupyter-wrapper .bp3-editable-text.bp3-intent-warning:hover:before{-webkit-box-shadow:0 0 0 0 rgba(217,130,43,0),0 0 0 0 rgba(217,130,43,0),inset 0 0 0 1px rgba(217,130,43,.4);box-shadow:0 0 #d9822b00,0 0 #d9822b00,inset 0 0 0 1px #d9822b66}.jupyter-wrapper .bp3-editable-text.bp3-intent-warning.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #d9822b,0 0 0 3px rgba(217,130,43,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #d9822b,0 0 0 3px #d9822b4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-editable-text.bp3-intent-danger .bp3-editable-text-input,.jupyter-wrapper .bp3-editable-text.bp3-intent-danger .bp3-editable-text-content{color:#db3737}.jupyter-wrapper .bp3-editable-text.bp3-intent-danger:hover:before{-webkit-box-shadow:0 0 0 0 rgba(219,55,55,0),0 0 0 0 rgba(219,55,55,0),inset 0 0 0 1px rgba(219,55,55,.4);box-shadow:0 0 #db373700,0 0 #db373700,inset 0 0 0 1px #db373766}.jupyter-wrapper .bp3-editable-text.bp3-intent-danger.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #db3737,0 0 0 3px rgba(219,55,55,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #db3737,0 0 0 3px #db37374d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-dark .bp3-editable-text:hover:before{-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(255,255,255,.15);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #ffffff26}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-editable-text-editing:before{background-color:#10161a4d;-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-disabled:before{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-primary .bp3-editable-text-content{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-primary:hover:before{-webkit-box-shadow:0 0 0 0 rgba(72,175,240,0),0 0 0 0 rgba(72,175,240,0),inset 0 0 0 1px rgba(72,175,240,.4);box-shadow:0 0 #48aff000,0 0 #48aff000,inset 0 0 0 1px #48aff066}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-primary.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #48aff0,0 0 0 3px rgba(72,175,240,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #48aff0,0 0 0 3px #48aff04d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-success .bp3-editable-text-content{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-success:hover:before{-webkit-box-shadow:0 0 0 0 rgba(61,204,145,0),0 0 0 0 rgba(61,204,145,0),inset 0 0 0 1px rgba(61,204,145,.4);box-shadow:0 0 #3dcc9100,0 0 #3dcc9100,inset 0 0 0 1px #3dcc9166}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-success.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #3dcc91,0 0 0 3px rgba(61,204,145,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #3dcc91,0 0 0 3px #3dcc914d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-warning .bp3-editable-text-content{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-warning:hover:before{-webkit-box-shadow:0 0 0 0 rgba(255,179,102,0),0 0 0 0 rgba(255,179,102,0),inset 0 0 0 1px rgba(255,179,102,.4);box-shadow:0 0 #ffb36600,0 0 #ffb36600,inset 0 0 0 1px #ffb36666}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-warning.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #ffb366,0 0 0 3px rgba(255,179,102,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #ffb366,0 0 0 3px #ffb3664d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-danger .bp3-editable-text-content{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-danger:hover:before{-webkit-box-shadow:0 0 0 0 rgba(255,115,115,0),0 0 0 0 rgba(255,115,115,0),inset 0 0 0 1px rgba(255,115,115,.4);box-shadow:0 0 #ff737300,0 0 #ff737300,inset 0 0 0 1px #ff737366}.jupyter-wrapper .bp3-dark .bp3-editable-text.bp3-intent-danger.bp3-editable-text-editing:before{-webkit-box-shadow:0 0 0 1px #ff7373,0 0 0 3px rgba(255,115,115,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #ff7373,0 0 0 3px #ff73734d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-editable-text-input,.jupyter-wrapper .bp3-editable-text-content{color:inherit;display:inherit;font:inherit;letter-spacing:inherit;max-width:inherit;min-width:inherit;position:relative;resize:none;text-transform:inherit;vertical-align:top}.jupyter-wrapper .bp3-editable-text-input{background:none;border:none;-webkit-box-shadow:none;box-shadow:none;padding:0;white-space:pre-wrap;width:100%}.jupyter-wrapper .bp3-editable-text-input::-webkit-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-editable-text-input::-moz-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-editable-text-input:-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-editable-text-input::-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-editable-text-input::placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-editable-text-input:focus{outline:none}.jupyter-wrapper .bp3-editable-text-input::-ms-clear{display:none}.jupyter-wrapper .bp3-editable-text-content{overflow:hidden;padding-right:2px;text-overflow:ellipsis;white-space:pre}.jupyter-wrapper .bp3-editable-text-editing>.bp3-editable-text-content{left:0;position:absolute;visibility:hidden}.jupyter-wrapper .bp3-editable-text-placeholder>.bp3-editable-text-content{color:#5c708099}.jupyter-wrapper .bp3-dark .bp3-editable-text-placeholder>.bp3-editable-text-content{color:#a7b6c299}.jupyter-wrapper .bp3-editable-text.bp3-multiline{display:block}.jupyter-wrapper .bp3-editable-text.bp3-multiline .bp3-editable-text-content{overflow:auto;white-space:pre-wrap;word-wrap:break-word}.jupyter-wrapper .bp3-divider{border-bottom:1px solid rgba(16,22,26,.15);border-right:1px solid rgba(16,22,26,.15);margin:5px}.jupyter-wrapper .bp3-dark .bp3-divider{border-color:#10161a66}.jupyter-wrapper .bp3-control-group{-webkit-transform:translateZ(0);transform:translateZ(0);display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch}.jupyter-wrapper .bp3-control-group>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-control-group>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-control-group .bp3-button,.jupyter-wrapper .bp3-control-group .bp3-html-select,.jupyter-wrapper .bp3-control-group .bp3-input,.jupyter-wrapper .bp3-control-group .bp3-select{position:relative}.jupyter-wrapper .bp3-control-group .bp3-input{border-radius:inherit;z-index:2}.jupyter-wrapper .bp3-control-group .bp3-input:focus{border-radius:3px;z-index:14}.jupyter-wrapper .bp3-control-group .bp3-input[class*=bp3-intent]{z-index:13}.jupyter-wrapper .bp3-control-group .bp3-input[class*=bp3-intent]:focus{z-index:15}.jupyter-wrapper .bp3-control-group .bp3-input[readonly],.jupyter-wrapper .bp3-control-group .bp3-input:disabled,.jupyter-wrapper .bp3-control-group .bp3-input.bp3-disabled{z-index:1}.jupyter-wrapper .bp3-control-group .bp3-input-group[class*=bp3-intent] .bp3-input{z-index:13}.jupyter-wrapper .bp3-control-group .bp3-input-group[class*=bp3-intent] .bp3-input:focus{z-index:15}.jupyter-wrapper .bp3-control-group .bp3-button,.jupyter-wrapper .bp3-control-group .bp3-html-select select,.jupyter-wrapper .bp3-control-group .bp3-select select{-webkit-transform:translateZ(0);transform:translateZ(0);border-radius:inherit;z-index:4}.jupyter-wrapper .bp3-control-group .bp3-button:focus,.jupyter-wrapper .bp3-control-group .bp3-html-select select:focus,.jupyter-wrapper .bp3-control-group .bp3-select select:focus{z-index:5}.jupyter-wrapper .bp3-control-group .bp3-button:hover,.jupyter-wrapper .bp3-control-group .bp3-html-select select:hover,.jupyter-wrapper .bp3-control-group .bp3-select select:hover{z-index:6}.jupyter-wrapper .bp3-control-group .bp3-button:active,.jupyter-wrapper .bp3-control-group .bp3-html-select select:active,.jupyter-wrapper .bp3-control-group .bp3-select select:active{z-index:7}.jupyter-wrapper .bp3-control-group .bp3-button[readonly],.jupyter-wrapper .bp3-control-group .bp3-button:disabled,.jupyter-wrapper .bp3-control-group .bp3-button.bp3-disabled,.jupyter-wrapper .bp3-control-group .bp3-html-select select[readonly],.jupyter-wrapper .bp3-control-group .bp3-html-select select:disabled,.jupyter-wrapper .bp3-control-group .bp3-html-select select.bp3-disabled,.jupyter-wrapper .bp3-control-group .bp3-select select[readonly],.jupyter-wrapper .bp3-control-group .bp3-select select:disabled,.jupyter-wrapper .bp3-control-group .bp3-select select.bp3-disabled{z-index:3}.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent],.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent],.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent]{z-index:9}.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent]:focus,.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent]:focus,.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent]:focus{z-index:10}.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent]:hover,.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent]:hover,.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent]:hover{z-index:11}.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent]:active,.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent]:active,.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent]:active{z-index:12}.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent][readonly],.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent]:disabled,.jupyter-wrapper .bp3-control-group .bp3-button[class*=bp3-intent].bp3-disabled,.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent][readonly],.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent]:disabled,.jupyter-wrapper .bp3-control-group .bp3-html-select select[class*=bp3-intent].bp3-disabled,.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent][readonly],.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent]:disabled,.jupyter-wrapper .bp3-control-group .bp3-select select[class*=bp3-intent].bp3-disabled{z-index:8}.jupyter-wrapper .bp3-control-group .bp3-input-group>.bp3-icon,.jupyter-wrapper .bp3-control-group .bp3-input-group>.bp3-button,.jupyter-wrapper .bp3-control-group .bp3-input-group>.bp3-input-left-container,.jupyter-wrapper .bp3-control-group .bp3-input-group>.bp3-input-action{z-index:16}.jupyter-wrapper .bp3-control-group .bp3-select:after,.jupyter-wrapper .bp3-control-group .bp3-html-select:after,.jupyter-wrapper .bp3-control-group .bp3-select>.bp3-icon,.jupyter-wrapper .bp3-control-group .bp3-html-select>.bp3-icon{z-index:17}.jupyter-wrapper .bp3-control-group .bp3-select:focus-within{z-index:5}.jupyter-wrapper .bp3-control-group:not(.bp3-vertical)>*:not(.bp3-divider){margin-right:-1px}.jupyter-wrapper .bp3-control-group:not(.bp3-vertical)>.bp3-divider:not(:first-child){margin-left:6px}.jupyter-wrapper .bp3-dark .bp3-control-group:not(.bp3-vertical)>*:not(.bp3-divider){margin-right:0}.jupyter-wrapper .bp3-dark .bp3-control-group:not(.bp3-vertical)>.bp3-button+.bp3-button{margin-left:1px}.jupyter-wrapper .bp3-control-group .bp3-popover-wrapper,.jupyter-wrapper .bp3-control-group .bp3-popover-target{border-radius:inherit}.jupyter-wrapper .bp3-control-group>:first-child{border-radius:3px 0 0 3px}.jupyter-wrapper .bp3-control-group>:last-child{border-radius:0 3px 3px 0;margin-right:0}.jupyter-wrapper .bp3-control-group>:only-child{border-radius:3px;margin-right:0}.jupyter-wrapper .bp3-control-group .bp3-input-group .bp3-button{border-radius:3px}.jupyter-wrapper .bp3-control-group .bp3-numeric-input:not(:first-child) .bp3-input-group{border-bottom-left-radius:0;border-top-left-radius:0}.jupyter-wrapper .bp3-control-group.bp3-fill{width:100%}.jupyter-wrapper .bp3-control-group>.bp3-fill{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto}.jupyter-wrapper .bp3-control-group.bp3-fill>*:not(.bp3-fixed){-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto}.jupyter-wrapper .bp3-control-group.bp3-vertical{-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.jupyter-wrapper .bp3-control-group.bp3-vertical>*{margin-top:-1px}.jupyter-wrapper .bp3-control-group.bp3-vertical>:first-child{border-radius:3px 3px 0 0;margin-top:0}.jupyter-wrapper .bp3-control-group.bp3-vertical>:last-child{border-radius:0 0 3px 3px}.jupyter-wrapper .bp3-control{cursor:pointer;display:block;margin-bottom:10px;position:relative;text-transform:none}.jupyter-wrapper .bp3-control input:checked~.bp3-control-indicator{background-color:#137cbd;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.1)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.1),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33;color:#fff}.jupyter-wrapper .bp3-control:hover input:checked~.bp3-control-indicator{background-color:#106ba3;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33}.jupyter-wrapper .bp3-control input:not(:disabled):active:checked~.bp3-control-indicator{background:#0e5a8a;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-control input:disabled:checked~.bp3-control-indicator{background:rgba(19,124,189,.5);-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-control input:checked~.bp3-control-indicator{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-control:hover input:checked~.bp3-control-indicator{background-color:#106ba3;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-control input:not(:disabled):active:checked~.bp3-control-indicator{background-color:#0e5a8a;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-control input:disabled:checked~.bp3-control-indicator{background:rgba(14,90,138,.5);-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-control:not(.bp3-align-right){padding-left:26px}.jupyter-wrapper .bp3-control:not(.bp3-align-right) .bp3-control-indicator{margin-left:-26px}.jupyter-wrapper .bp3-control.bp3-align-right{padding-right:26px}.jupyter-wrapper .bp3-control.bp3-align-right .bp3-control-indicator{margin-right:-26px}.jupyter-wrapper .bp3-control.bp3-disabled{color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-control.bp3-inline{display:inline-block;margin-right:20px}.jupyter-wrapper .bp3-control input{left:0;opacity:0;position:absolute;top:0;z-index:-1}.jupyter-wrapper .bp3-control .bp3-control-indicator{background-clip:padding-box;background-color:#f5f8fa;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.8)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.8),rgba(255,255,255,0));border:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a;cursor:pointer;display:inline-block;font-size:16px;height:1em;margin-right:10px;margin-top:-3px;position:relative;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;vertical-align:middle;width:1em}.jupyter-wrapper .bp3-control .bp3-control-indicator:before{content:\"\";display:block;height:1em;width:1em}.jupyter-wrapper .bp3-control:hover .bp3-control-indicator{background-color:#ebf1f5}.jupyter-wrapper .bp3-control input:not(:disabled):active~.bp3-control-indicator{background:#d8e1e8;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-control input:disabled~.bp3-control-indicator{background:rgba(206,217,224,.5);-webkit-box-shadow:none;box-shadow:none;cursor:not-allowed}.jupyter-wrapper .bp3-control input:focus~.bp3-control-indicator{outline:rgba(19,124,189,.6) auto 2px;outline-offset:2px;-moz-outline-radius:6px}.jupyter-wrapper .bp3-control.bp3-align-right .bp3-control-indicator{float:right;margin-left:10px;margin-top:1px}.jupyter-wrapper .bp3-control.bp3-large{font-size:16px}.jupyter-wrapper .bp3-control.bp3-large:not(.bp3-align-right){padding-left:30px}.jupyter-wrapper .bp3-control.bp3-large:not(.bp3-align-right) .bp3-control-indicator{margin-left:-30px}.jupyter-wrapper .bp3-control.bp3-large.bp3-align-right{padding-right:30px}.jupyter-wrapper .bp3-control.bp3-large.bp3-align-right .bp3-control-indicator{margin-right:-30px}.jupyter-wrapper .bp3-control.bp3-large .bp3-control-indicator{font-size:20px}.jupyter-wrapper .bp3-control.bp3-large.bp3-align-right .bp3-control-indicator{margin-top:0}.jupyter-wrapper .bp3-control.bp3-checkbox input:indeterminate~.bp3-control-indicator{background-color:#137cbd;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.1)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.1),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33;color:#fff}.jupyter-wrapper .bp3-control.bp3-checkbox:hover input:indeterminate~.bp3-control-indicator{background-color:#106ba3;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 -1px 0 rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 -1px #10161a33}.jupyter-wrapper .bp3-control.bp3-checkbox input:not(:disabled):active:indeterminate~.bp3-control-indicator{background:#0e5a8a;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-control.bp3-checkbox input:disabled:indeterminate~.bp3-control-indicator{background:rgba(19,124,189,.5);-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-control.bp3-checkbox input:indeterminate~.bp3-control-indicator{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-control.bp3-checkbox:hover input:indeterminate~.bp3-control-indicator{background-color:#106ba3;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-control.bp3-checkbox input:not(:disabled):active:indeterminate~.bp3-control-indicator{background-color:#0e5a8a;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a66,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-control.bp3-checkbox input:disabled:indeterminate~.bp3-control-indicator{background:rgba(14,90,138,.5);-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-control.bp3-checkbox .bp3-control-indicator{border-radius:3px}.jupyter-wrapper .bp3-control.bp3-checkbox input:checked~.bp3-control-indicator:before{background-image:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill-rule='evenodd' clip-rule='evenodd' d='M12 5c-.28 0-.53.11-.71.29L7 9.59l-2.29-2.3a1.003 1.003 0 00-1.42 1.42l3 3c.18.18.43.29.71.29s.53-.11.71-.29l5-5A1.003 1.003 0 0012 5z' fill='white'/%3e%3c/svg%3e\")}.jupyter-wrapper .bp3-control.bp3-checkbox input:indeterminate~.bp3-control-indicator:before{background-image:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill-rule='evenodd' clip-rule='evenodd' d='M11 7H5c-.55 0-1 .45-1 1s.45 1 1 1h6c.55 0 1-.45 1-1s-.45-1-1-1z' fill='white'/%3e%3c/svg%3e\")}.jupyter-wrapper .bp3-control.bp3-radio .bp3-control-indicator{border-radius:50%}.jupyter-wrapper .bp3-control.bp3-radio input:checked~.bp3-control-indicator:before{background-image:radial-gradient(#ffffff,#ffffff 28%,transparent 32%)}.jupyter-wrapper .bp3-control.bp3-radio input:checked:disabled~.bp3-control-indicator:before{opacity:.5}.jupyter-wrapper .bp3-control.bp3-radio input:focus~.bp3-control-indicator{-moz-outline-radius:16px}.jupyter-wrapper .bp3-control.bp3-switch input~.bp3-control-indicator{background:rgba(167,182,194,.5)}.jupyter-wrapper .bp3-control.bp3-switch:hover input~.bp3-control-indicator{background:rgba(115,134,148,.5)}.jupyter-wrapper .bp3-control.bp3-switch input:not(:disabled):active~.bp3-control-indicator{background:rgba(92,112,128,.5)}.jupyter-wrapper .bp3-control.bp3-switch input:disabled~.bp3-control-indicator{background:rgba(206,217,224,.5)}.jupyter-wrapper .bp3-control.bp3-switch input:disabled~.bp3-control-indicator:before{background:rgba(255,255,255,.8)}.jupyter-wrapper .bp3-control.bp3-switch input:checked~.bp3-control-indicator{background:#137cbd}.jupyter-wrapper .bp3-control.bp3-switch:hover input:checked~.bp3-control-indicator{background:#106ba3}.jupyter-wrapper .bp3-control.bp3-switch input:checked:not(:disabled):active~.bp3-control-indicator{background:#0e5a8a}.jupyter-wrapper .bp3-control.bp3-switch input:checked:disabled~.bp3-control-indicator{background:rgba(19,124,189,.5)}.jupyter-wrapper .bp3-control.bp3-switch input:checked:disabled~.bp3-control-indicator:before{background:rgba(255,255,255,.8)}.jupyter-wrapper .bp3-control.bp3-switch:not(.bp3-align-right){padding-left:38px}.jupyter-wrapper .bp3-control.bp3-switch:not(.bp3-align-right) .bp3-control-indicator{margin-left:-38px}.jupyter-wrapper .bp3-control.bp3-switch.bp3-align-right{padding-right:38px}.jupyter-wrapper .bp3-control.bp3-switch.bp3-align-right .bp3-control-indicator{margin-right:-38px}.jupyter-wrapper .bp3-control.bp3-switch .bp3-control-indicator{border:none;border-radius:1.75em;-webkit-box-shadow:none!important;box-shadow:none!important;min-width:1.75em;-webkit-transition:background-color .1s cubic-bezier(.4,1,.75,.9);transition:background-color .1s cubic-bezier(.4,1,.75,.9);width:auto}.jupyter-wrapper .bp3-control.bp3-switch .bp3-control-indicator:before{background:#ffffff;border-radius:50%;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a33,0 1px 1px #10161a33;height:calc(1em - 4px);left:0;margin:2px;position:absolute;-webkit-transition:left .1s cubic-bezier(.4,1,.75,.9);transition:left .1s cubic-bezier(.4,1,.75,.9);width:calc(1em - 4px)}.jupyter-wrapper .bp3-control.bp3-switch input:checked~.bp3-control-indicator:before{left:calc(100% - 1em)}.jupyter-wrapper .bp3-control.bp3-switch.bp3-large:not(.bp3-align-right){padding-left:45px}.jupyter-wrapper .bp3-control.bp3-switch.bp3-large:not(.bp3-align-right) .bp3-control-indicator{margin-left:-45px}.jupyter-wrapper .bp3-control.bp3-switch.bp3-large.bp3-align-right{padding-right:45px}.jupyter-wrapper .bp3-control.bp3-switch.bp3-large.bp3-align-right .bp3-control-indicator{margin-right:-45px}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input~.bp3-control-indicator{background:rgba(16,22,26,.5)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch:hover input~.bp3-control-indicator{background:rgba(16,22,26,.7)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:not(:disabled):active~.bp3-control-indicator{background:rgba(16,22,26,.9)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:disabled~.bp3-control-indicator{background:rgba(57,75,89,.5)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:disabled~.bp3-control-indicator:before{background:rgba(16,22,26,.4)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:checked~.bp3-control-indicator{background:#137cbd}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch:hover input:checked~.bp3-control-indicator{background:#106ba3}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:checked:not(:disabled):active~.bp3-control-indicator{background:#0e5a8a}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:checked:disabled~.bp3-control-indicator{background:rgba(14,90,138,.5)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:checked:disabled~.bp3-control-indicator:before{background:rgba(16,22,26,.4)}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch .bp3-control-indicator:before{background:#394b59;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-control.bp3-switch input:checked~.bp3-control-indicator:before{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a66}.jupyter-wrapper .bp3-control.bp3-switch .bp3-switch-inner-text{font-size:.7em;text-align:center}.jupyter-wrapper .bp3-control.bp3-switch .bp3-control-indicator-child:first-child{line-height:0;margin-left:.5em;margin-right:1.2em;visibility:hidden}.jupyter-wrapper .bp3-control.bp3-switch .bp3-control-indicator-child:last-child{line-height:1em;margin-left:1.2em;margin-right:.5em;visibility:visible}.jupyter-wrapper .bp3-control.bp3-switch input:checked~.bp3-control-indicator .bp3-control-indicator-child:first-child{line-height:1em;visibility:visible}.jupyter-wrapper .bp3-control.bp3-switch input:checked~.bp3-control-indicator .bp3-control-indicator-child:last-child{line-height:0;visibility:hidden}.jupyter-wrapper .bp3-dark .bp3-control{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-control.bp3-disabled{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-control .bp3-control-indicator{background-color:#394b59;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.05)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.05),rgba(255,255,255,0));-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-control:hover .bp3-control-indicator{background-color:#30404d}.jupyter-wrapper .bp3-dark .bp3-control input:not(:disabled):active~.bp3-control-indicator{background:#202b33;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.6),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a99,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-control input:disabled~.bp3-control-indicator{background:rgba(57,75,89,.5);-webkit-box-shadow:none;box-shadow:none;cursor:not-allowed}.jupyter-wrapper .bp3-dark .bp3-control.bp3-checkbox input:disabled:checked~.bp3-control-indicator,.jupyter-wrapper .bp3-dark .bp3-control.bp3-checkbox input:disabled:indeterminate~.bp3-control-indicator{color:#a7b6c299}.jupyter-wrapper .bp3-file-input{cursor:pointer;display:inline-block;height:30px;position:relative}.jupyter-wrapper .bp3-file-input input{margin:0;min-width:200px;opacity:0}.jupyter-wrapper .bp3-file-input input:disabled+.bp3-file-upload-input,.jupyter-wrapper .bp3-file-input input.bp3-disabled+.bp3-file-upload-input{background:rgba(206,217,224,.5);-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;resize:none}.jupyter-wrapper .bp3-file-input input:disabled+.bp3-file-upload-input:after,.jupyter-wrapper .bp3-file-input input.bp3-disabled+.bp3-file-upload-input:after{background-color:#ced9e080;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;outline:none}.jupyter-wrapper .bp3-file-input input:disabled+.bp3-file-upload-input:after .bp3-active,.jupyter-wrapper .bp3-file-input input:disabled+.bp3-file-upload-input:after .bp3-active:hover,.jupyter-wrapper .bp3-file-input input.bp3-disabled+.bp3-file-upload-input:after .bp3-active,.jupyter-wrapper .bp3-file-input input.bp3-disabled+.bp3-file-upload-input:after .bp3-active:hover{background:rgba(206,217,224,.7)}.jupyter-wrapper .bp3-dark .bp3-file-input input:disabled+.bp3-file-upload-input,.jupyter-wrapper .bp3-dark .bp3-file-input input.bp3-disabled+.bp3-file-upload-input{background:rgba(57,75,89,.5);-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-input input:disabled+.bp3-file-upload-input:after,.jupyter-wrapper .bp3-dark .bp3-file-input input.bp3-disabled+.bp3-file-upload-input:after{background-color:#394b5980;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-input input:disabled+.bp3-file-upload-input:after .bp3-active,.jupyter-wrapper .bp3-dark .bp3-file-input input.bp3-disabled+.bp3-file-upload-input:after .bp3-active{background:rgba(57,75,89,.7)}.jupyter-wrapper .bp3-file-input.bp3-file-input-has-selection .bp3-file-upload-input{color:#182026}.jupyter-wrapper .bp3-dark .bp3-file-input.bp3-file-input-has-selection .bp3-file-upload-input{color:#f5f8fa}.jupyter-wrapper .bp3-file-input.bp3-fill{width:100%}.jupyter-wrapper .bp3-file-input.bp3-large,.jupyter-wrapper .bp3-large .bp3-file-input{height:40px}.jupyter-wrapper .bp3-file-input .bp3-file-upload-input-custom-text:after{content:attr(bp3-button-text)}.jupyter-wrapper .bp3-file-upload-input{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:#ffffff;border:none;border-radius:3px;-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33;color:#182026;font-size:14px;font-weight:400;height:30px;line-height:30px;outline:none;padding:0 80px 0 10px;-webkit-transition:-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:box-shadow .1s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);vertical-align:middle;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;color:#5c708099;left:0;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .bp3-file-upload-input::-webkit-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-file-upload-input::-moz-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-file-upload-input:-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-file-upload-input::-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-file-upload-input::placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-file-upload-input:focus,.jupyter-wrapper .bp3-file-upload-input.bp3-active{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-file-upload-input[type=search],.jupyter-wrapper .bp3-file-upload-input.bp3-round{border-radius:30px;-webkit-box-sizing:border-box;box-sizing:border-box;padding-left:10px}.jupyter-wrapper .bp3-file-upload-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.15);box-shadow:inset 0 0 0 1px #10161a26}.jupyter-wrapper .bp3-file-upload-input:disabled,.jupyter-wrapper .bp3-file-upload-input.bp3-disabled{background:rgba(206,217,224,.5);-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;resize:none}.jupyter-wrapper .bp3-file-upload-input:after{background-color:#f5f8fa;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.8)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.8),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a;color:#182026;min-height:24px;min-width:24px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;border-radius:3px;content:\"Browse\";line-height:24px;margin:3px;position:absolute;right:0;text-align:center;top:0;width:70px}.jupyter-wrapper .bp3-file-upload-input:after:hover{background-clip:padding-box;background-color:#ebf1f5;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a}.jupyter-wrapper .bp3-file-upload-input:after:active,.jupyter-wrapper .bp3-file-upload-input:after .bp3-active{background-color:#d8e1e8;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-file-upload-input:after:disabled,.jupyter-wrapper .bp3-file-upload-input:after .bp3-disabled{background-color:#ced9e080;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;outline:none}.jupyter-wrapper .bp3-file-upload-input:after:disabled .bp3-active,.jupyter-wrapper .bp3-file-upload-input:after:disabled .bp3-active:hover,.jupyter-wrapper .bp3-file-upload-input:after .bp3-disabled.bp3-active,.jupyter-wrapper .bp3-file-upload-input:after .bp3-disabled.bp3-active:hover{background:rgba(206,217,224,.7)}.jupyter-wrapper .bp3-file-upload-input:hover:after{background-clip:padding-box;background-color:#ebf1f5;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a}.jupyter-wrapper .bp3-file-upload-input:active:after{background-color:#d8e1e8;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-large .bp3-file-upload-input{font-size:16px;height:40px;line-height:40px;padding-right:95px}.jupyter-wrapper .bp3-large .bp3-file-upload-input[type=search],.jupyter-wrapper .bp3-large .bp3-file-upload-input.bp3-round{padding:0 15px}.jupyter-wrapper .bp3-large .bp3-file-upload-input:after{min-height:30px;min-width:30px;line-height:30px;margin:5px;width:85px}.jupyter-wrapper .bp3-dark .bp3-file-upload-input{background:rgba(16,22,26,.3);-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #137cbd00,0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66;color:#f5f8fa;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input::-webkit-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input::-moz-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input::-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input::placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:focus{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-file-upload-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:disabled,.jupyter-wrapper .bp3-dark .bp3-file-upload-input.bp3-disabled{background:rgba(57,75,89,.5);-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after{background-color:#394b59;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.05)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.05),rgba(255,255,255,0));-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after:hover,.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after:active,.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after .bp3-active{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after:hover{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after:active,.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after .bp3-active{background-color:#202b33;background-image:none;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.6),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a99,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after:disabled,.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after .bp3-disabled{background-color:#394b5980;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after:disabled .bp3-active,.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after .bp3-disabled.bp3-active{background:rgba(57,75,89,.7)}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:after .bp3-button-spinner .bp3-spinner-head{background:rgba(16,22,26,.5);stroke:#8a9ba8}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:hover:after{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-file-upload-input:active:after{background-color:#202b33;background-image:none;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.6),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a99,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-file-upload-input:after{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a}.jupyter-wrapper .bp3-form-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin:0 0 15px}.jupyter-wrapper .bp3-form-group label.bp3-label{margin-bottom:5px}.jupyter-wrapper .bp3-form-group .bp3-control{margin-top:7px}.jupyter-wrapper .bp3-form-group .bp3-form-helper-text{color:#5c7080;font-size:12px;margin-top:5px}.jupyter-wrapper .bp3-form-group.bp3-intent-primary .bp3-form-helper-text{color:#106ba3}.jupyter-wrapper .bp3-form-group.bp3-intent-success .bp3-form-helper-text{color:#0d8050}.jupyter-wrapper .bp3-form-group.bp3-intent-warning .bp3-form-helper-text{color:#bf7326}.jupyter-wrapper .bp3-form-group.bp3-intent-danger .bp3-form-helper-text{color:#c23030}.jupyter-wrapper .bp3-form-group.bp3-inline{-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.jupyter-wrapper .bp3-form-group.bp3-inline.bp3-large label.bp3-label{line-height:40px;margin:0 10px 0 0}.jupyter-wrapper .bp3-form-group.bp3-inline label.bp3-label{line-height:30px;margin:0 10px 0 0}.jupyter-wrapper .bp3-form-group.bp3-disabled .bp3-label,.jupyter-wrapper .bp3-form-group.bp3-disabled .bp3-text-muted,.jupyter-wrapper .bp3-form-group.bp3-disabled .bp3-form-helper-text{color:#5c708099!important}.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-intent-primary .bp3-form-helper-text{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-intent-success .bp3-form-helper-text{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-intent-warning .bp3-form-helper-text{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-intent-danger .bp3-form-helper-text{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-form-group .bp3-form-helper-text{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-disabled .bp3-label,.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-disabled .bp3-text-muted,.jupyter-wrapper .bp3-dark .bp3-form-group.bp3-disabled .bp3-form-helper-text{color:#a7b6c299!important}.jupyter-wrapper .bp3-input-group{display:block;position:relative}.jupyter-wrapper .bp3-input-group .bp3-input{position:relative;width:100%}.jupyter-wrapper .bp3-input-group .bp3-input:not(:first-child){padding-left:30px}.jupyter-wrapper .bp3-input-group .bp3-input:not(:last-child){padding-right:30px}.jupyter-wrapper .bp3-input-group .bp3-input-action,.jupyter-wrapper .bp3-input-group>.bp3-input-left-container,.jupyter-wrapper .bp3-input-group>.bp3-button,.jupyter-wrapper .bp3-input-group>.bp3-icon{position:absolute;top:0}.jupyter-wrapper .bp3-input-group .bp3-input-action:first-child,.jupyter-wrapper .bp3-input-group>.bp3-input-left-container:first-child,.jupyter-wrapper .bp3-input-group>.bp3-button:first-child,.jupyter-wrapper .bp3-input-group>.bp3-icon:first-child{left:0}.jupyter-wrapper .bp3-input-group .bp3-input-action:last-child,.jupyter-wrapper .bp3-input-group>.bp3-input-left-container:last-child,.jupyter-wrapper .bp3-input-group>.bp3-button:last-child,.jupyter-wrapper .bp3-input-group>.bp3-icon:last-child{right:0}.jupyter-wrapper .bp3-input-group .bp3-button{min-height:24px;min-width:24px;margin:3px;padding:0 7px}.jupyter-wrapper .bp3-input-group .bp3-button:empty{padding:0}.jupyter-wrapper .bp3-input-group>.bp3-input-left-container,.jupyter-wrapper .bp3-input-group>.bp3-icon{z-index:1}.jupyter-wrapper .bp3-input-group>.bp3-input-left-container>.bp3-icon,.jupyter-wrapper .bp3-input-group>.bp3-icon{color:#5c7080}.jupyter-wrapper .bp3-input-group>.bp3-input-left-container>.bp3-icon:empty,.jupyter-wrapper .bp3-input-group>.bp3-icon:empty{font-family:Icons16,sans-serif;font-size:16px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased}.jupyter-wrapper .bp3-input-group>.bp3-input-left-container>.bp3-icon,.jupyter-wrapper .bp3-input-group>.bp3-icon,.jupyter-wrapper .bp3-input-group .bp3-input-action>.bp3-spinner{margin:7px}.jupyter-wrapper .bp3-input-group .bp3-tag{margin:5px}.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:not(:hover):not(:focus),.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:not(:hover):not(:focus){color:#5c7080}.jupyter-wrapper .bp3-dark .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:not(:hover):not(:focus),.jupyter-wrapper .bp3-dark .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:not(:hover):not(:focus){color:#a7b6c2}.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:not(:hover):not(:focus) .bp3-icon,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:not(:hover):not(:focus) .bp3-icon-standard,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:not(:hover):not(:focus) .bp3-icon-large,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:not(:hover):not(:focus) .bp3-icon,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:not(:hover):not(:focus) .bp3-icon-standard,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:not(:hover):not(:focus) .bp3-icon-large{color:#5c7080}.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:disabled,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:disabled{color:#5c708099!important}.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:disabled .bp3-icon,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:disabled .bp3-icon-standard,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-button.bp3-minimal:disabled .bp3-icon-large,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:disabled .bp3-icon,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:disabled .bp3-icon-standard,.jupyter-wrapper .bp3-input-group .bp3-input:not(:focus)+.bp3-input-action .bp3-button.bp3-minimal:disabled .bp3-icon-large{color:#5c708099!important}.jupyter-wrapper .bp3-input-group.bp3-disabled{cursor:not-allowed}.jupyter-wrapper .bp3-input-group.bp3-disabled .bp3-icon{color:#5c708099}.jupyter-wrapper .bp3-input-group.bp3-large .bp3-button{min-height:30px;min-width:30px;margin:5px}.jupyter-wrapper .bp3-input-group.bp3-large>.bp3-input-left-container>.bp3-icon,.jupyter-wrapper .bp3-input-group.bp3-large>.bp3-icon,.jupyter-wrapper .bp3-input-group.bp3-large .bp3-input-action>.bp3-spinner{margin:12px}.jupyter-wrapper .bp3-input-group.bp3-large .bp3-input{font-size:16px;height:40px;line-height:40px}.jupyter-wrapper .bp3-input-group.bp3-large .bp3-input[type=search],.jupyter-wrapper .bp3-input-group.bp3-large .bp3-input.bp3-round{padding:0 15px}.jupyter-wrapper .bp3-input-group.bp3-large .bp3-input:not(:first-child){padding-left:40px}.jupyter-wrapper .bp3-input-group.bp3-large .bp3-input:not(:last-child){padding-right:40px}.jupyter-wrapper .bp3-input-group.bp3-small .bp3-button,.jupyter-wrapper .bp3-input-group.bp3-small .bp3-tag{min-height:20px;min-width:20px;margin:2px}.jupyter-wrapper .bp3-input-group.bp3-small>.bp3-input-left-container>.bp3-icon,.jupyter-wrapper .bp3-input-group.bp3-small>.bp3-icon,.jupyter-wrapper .bp3-input-group.bp3-small .bp3-input-action>.bp3-spinner{margin:4px}.jupyter-wrapper .bp3-input-group.bp3-small .bp3-input{font-size:12px;height:24px;line-height:24px;padding-left:8px;padding-right:8px}.jupyter-wrapper .bp3-input-group.bp3-small .bp3-input[type=search],.jupyter-wrapper .bp3-input-group.bp3-small .bp3-input.bp3-round{padding:0 12px}.jupyter-wrapper .bp3-input-group.bp3-small .bp3-input:not(:first-child){padding-left:24px}.jupyter-wrapper .bp3-input-group.bp3-small .bp3-input:not(:last-child){padding-right:24px}.jupyter-wrapper .bp3-input-group.bp3-fill{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;width:100%}.jupyter-wrapper .bp3-input-group.bp3-round .bp3-button,.jupyter-wrapper .bp3-input-group.bp3-round .bp3-input,.jupyter-wrapper .bp3-input-group.bp3-round .bp3-tag{border-radius:30px}.jupyter-wrapper .bp3-dark .bp3-input-group .bp3-icon{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-input-group.bp3-disabled .bp3-icon{color:#a7b6c299}.jupyter-wrapper .bp3-input-group.bp3-intent-primary .bp3-input{-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px #137cbd,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #137cbd,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-primary .bp3-input:focus{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-primary .bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px #137cbd;box-shadow:inset 0 0 0 1px #137cbd}.jupyter-wrapper .bp3-input-group.bp3-intent-primary .bp3-input:disabled,.jupyter-wrapper .bp3-input-group.bp3-intent-primary .bp3-input.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input-group.bp3-intent-primary>.bp3-icon{color:#106ba3}.jupyter-wrapper .bp3-dark .bp3-input-group.bp3-intent-primary>.bp3-icon{color:#48aff0}.jupyter-wrapper .bp3-input-group.bp3-intent-success .bp3-input{-webkit-box-shadow:0 0 0 0 rgba(15,153,96,0),0 0 0 0 rgba(15,153,96,0),inset 0 0 0 1px #0f9960,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #0f996000,0 0 #0f996000,inset 0 0 0 1px #0f9960,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-success .bp3-input:focus{-webkit-box-shadow:0 0 0 1px #0f9960,0 0 0 3px rgba(15,153,96,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #0f9960,0 0 0 3px #0f99604d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-success .bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px #0f9960;box-shadow:inset 0 0 0 1px #0f9960}.jupyter-wrapper .bp3-input-group.bp3-intent-success .bp3-input:disabled,.jupyter-wrapper .bp3-input-group.bp3-intent-success .bp3-input.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input-group.bp3-intent-success>.bp3-icon{color:#0d8050}.jupyter-wrapper .bp3-dark .bp3-input-group.bp3-intent-success>.bp3-icon{color:#3dcc91}.jupyter-wrapper .bp3-input-group.bp3-intent-warning .bp3-input{-webkit-box-shadow:0 0 0 0 rgba(217,130,43,0),0 0 0 0 rgba(217,130,43,0),inset 0 0 0 1px #d9822b,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #d9822b00,0 0 #d9822b00,inset 0 0 0 1px #d9822b,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-warning .bp3-input:focus{-webkit-box-shadow:0 0 0 1px #d9822b,0 0 0 3px rgba(217,130,43,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #d9822b,0 0 0 3px #d9822b4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-warning .bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px #d9822b;box-shadow:inset 0 0 0 1px #d9822b}.jupyter-wrapper .bp3-input-group.bp3-intent-warning .bp3-input:disabled,.jupyter-wrapper .bp3-input-group.bp3-intent-warning .bp3-input.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input-group.bp3-intent-warning>.bp3-icon{color:#bf7326}.jupyter-wrapper .bp3-dark .bp3-input-group.bp3-intent-warning>.bp3-icon{color:#ffb366}.jupyter-wrapper .bp3-input-group.bp3-intent-danger .bp3-input{-webkit-box-shadow:0 0 0 0 rgba(219,55,55,0),0 0 0 0 rgba(219,55,55,0),inset 0 0 0 1px #db3737,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #db373700,0 0 #db373700,inset 0 0 0 1px #db3737,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-danger .bp3-input:focus{-webkit-box-shadow:0 0 0 1px #db3737,0 0 0 3px rgba(219,55,55,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #db3737,0 0 0 3px #db37374d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input-group.bp3-intent-danger .bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px #db3737;box-shadow:inset 0 0 0 1px #db3737}.jupyter-wrapper .bp3-input-group.bp3-intent-danger .bp3-input:disabled,.jupyter-wrapper .bp3-input-group.bp3-intent-danger .bp3-input.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input-group.bp3-intent-danger>.bp3-icon{color:#c23030}.jupyter-wrapper .bp3-dark .bp3-input-group.bp3-intent-danger>.bp3-icon{color:#ff7373}.jupyter-wrapper .bp3-input{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:#ffffff;border:none;border-radius:3px;-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33;color:#182026;font-size:14px;font-weight:400;height:30px;line-height:30px;outline:none;padding:0 10px;-webkit-transition:-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:box-shadow .1s cubic-bezier(.4,1,.75,.9);transition:box-shadow .1s cubic-bezier(.4,1,.75,.9),-webkit-box-shadow .1s cubic-bezier(.4,1,.75,.9);vertical-align:middle}.jupyter-wrapper .bp3-input::-webkit-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input::-moz-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input:-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input::-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input::placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input:focus,.jupyter-wrapper .bp3-input.bp3-active{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input[type=search],.jupyter-wrapper .bp3-input.bp3-round{border-radius:30px;-webkit-box-sizing:border-box;box-sizing:border-box;padding-left:10px}.jupyter-wrapper .bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.15);box-shadow:inset 0 0 0 1px #10161a26}.jupyter-wrapper .bp3-input:disabled,.jupyter-wrapper .bp3-input.bp3-disabled{background:rgba(206,217,224,.5);-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;resize:none}.jupyter-wrapper .bp3-input.bp3-large{font-size:16px;height:40px;line-height:40px}.jupyter-wrapper .bp3-input.bp3-large[type=search],.jupyter-wrapper .bp3-input.bp3-large.bp3-round{padding:0 15px}.jupyter-wrapper .bp3-input.bp3-small{font-size:12px;height:24px;line-height:24px;padding-left:8px;padding-right:8px}.jupyter-wrapper .bp3-input.bp3-small[type=search],.jupyter-wrapper .bp3-input.bp3-small.bp3-round{padding:0 12px}.jupyter-wrapper .bp3-input.bp3-fill{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;width:100%}.jupyter-wrapper .bp3-dark .bp3-input{background:rgba(16,22,26,.3);-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #137cbd00,0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-input::-webkit-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-input::-moz-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-input:-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-input::-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-input::placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-input:focus{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input:disabled,.jupyter-wrapper .bp3-dark .bp3-input.bp3-disabled{background:rgba(57,75,89,.5);-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-input.bp3-intent-primary{-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px #137cbd,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #137cbd,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-primary:focus{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-primary[readonly]{-webkit-box-shadow:inset 0 0 0 1px #137cbd;box-shadow:inset 0 0 0 1px #137cbd}.jupyter-wrapper .bp3-input.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-input.bp3-intent-primary.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-primary{-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px #137cbd,inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #137cbd00,0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #137cbd,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-primary:focus{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-primary[readonly]{-webkit-box-shadow:inset 0 0 0 1px #137cbd;box-shadow:inset 0 0 0 1px #137cbd}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-primary.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input.bp3-intent-success{-webkit-box-shadow:0 0 0 0 rgba(15,153,96,0),0 0 0 0 rgba(15,153,96,0),inset 0 0 0 1px #0f9960,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #0f996000,0 0 #0f996000,inset 0 0 0 1px #0f9960,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-success:focus{-webkit-box-shadow:0 0 0 1px #0f9960,0 0 0 3px rgba(15,153,96,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #0f9960,0 0 0 3px #0f99604d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-success[readonly]{-webkit-box-shadow:inset 0 0 0 1px #0f9960;box-shadow:inset 0 0 0 1px #0f9960}.jupyter-wrapper .bp3-input.bp3-intent-success:disabled,.jupyter-wrapper .bp3-input.bp3-intent-success.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-success{-webkit-box-shadow:0 0 0 0 rgba(15,153,96,0),0 0 0 0 rgba(15,153,96,0),0 0 0 0 rgba(15,153,96,0),inset 0 0 0 1px #0f9960,inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #0f996000,0 0 #0f996000,0 0 #0f996000,inset 0 0 0 1px #0f9960,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-success:focus{-webkit-box-shadow:0 0 0 1px #0f9960,0 0 0 1px #0f9960,0 0 0 3px rgba(15,153,96,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #0f9960,0 0 0 1px #0f9960,0 0 0 3px #0f99604d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-success[readonly]{-webkit-box-shadow:inset 0 0 0 1px #0f9960;box-shadow:inset 0 0 0 1px #0f9960}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-success.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input.bp3-intent-warning{-webkit-box-shadow:0 0 0 0 rgba(217,130,43,0),0 0 0 0 rgba(217,130,43,0),inset 0 0 0 1px #d9822b,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #d9822b00,0 0 #d9822b00,inset 0 0 0 1px #d9822b,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-warning:focus{-webkit-box-shadow:0 0 0 1px #d9822b,0 0 0 3px rgba(217,130,43,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #d9822b,0 0 0 3px #d9822b4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-warning[readonly]{-webkit-box-shadow:inset 0 0 0 1px #d9822b;box-shadow:inset 0 0 0 1px #d9822b}.jupyter-wrapper .bp3-input.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-input.bp3-intent-warning.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-warning{-webkit-box-shadow:0 0 0 0 rgba(217,130,43,0),0 0 0 0 rgba(217,130,43,0),0 0 0 0 rgba(217,130,43,0),inset 0 0 0 1px #d9822b,inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #d9822b00,0 0 #d9822b00,0 0 #d9822b00,inset 0 0 0 1px #d9822b,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-warning:focus{-webkit-box-shadow:0 0 0 1px #d9822b,0 0 0 1px #d9822b,0 0 0 3px rgba(217,130,43,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #d9822b,0 0 0 1px #d9822b,0 0 0 3px #d9822b4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-warning[readonly]{-webkit-box-shadow:inset 0 0 0 1px #d9822b;box-shadow:inset 0 0 0 1px #d9822b}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-warning.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input.bp3-intent-danger{-webkit-box-shadow:0 0 0 0 rgba(219,55,55,0),0 0 0 0 rgba(219,55,55,0),inset 0 0 0 1px #db3737,inset 0 0 0 1px rgba(16,22,26,.15),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 #db373700,0 0 #db373700,inset 0 0 0 1px #db3737,inset 0 0 0 1px #10161a26,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-danger:focus{-webkit-box-shadow:0 0 0 1px #db3737,0 0 0 3px rgba(219,55,55,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #db3737,0 0 0 3px #db37374d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-input.bp3-intent-danger[readonly]{-webkit-box-shadow:inset 0 0 0 1px #db3737;box-shadow:inset 0 0 0 1px #db3737}.jupyter-wrapper .bp3-input.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-input.bp3-intent-danger.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-danger{-webkit-box-shadow:0 0 0 0 rgba(219,55,55,0),0 0 0 0 rgba(219,55,55,0),0 0 0 0 rgba(219,55,55,0),inset 0 0 0 1px #db3737,inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #db373700,0 0 #db373700,0 0 #db373700,inset 0 0 0 1px #db3737,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-danger:focus{-webkit-box-shadow:0 0 0 1px #db3737,0 0 0 1px #db3737,0 0 0 3px rgba(219,55,55,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #db3737,0 0 0 1px #db3737,0 0 0 3px #db37374d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-danger[readonly]{-webkit-box-shadow:inset 0 0 0 1px #db3737;box-shadow:inset 0 0 0 1px #db3737}.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-input.bp3-intent-danger.bp3-disabled{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-input::-ms-clear{display:none}.jupyter-wrapper textarea.bp3-input{max-width:100%;padding:10px}.jupyter-wrapper textarea.bp3-input,.jupyter-wrapper textarea.bp3-input.bp3-large,.jupyter-wrapper textarea.bp3-input.bp3-small{height:auto;line-height:inherit}.jupyter-wrapper textarea.bp3-input.bp3-small{padding:8px}.jupyter-wrapper .bp3-dark textarea.bp3-input{background:rgba(16,22,26,.3);-webkit-box-shadow:0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),0 0 0 0 rgba(19,124,189,0),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 #137cbd00,0 0 #137cbd00,0 0 #137cbd00,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dark textarea.bp3-input::-webkit-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark textarea.bp3-input::-moz-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark textarea.bp3-input:-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark textarea.bp3-input::-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark textarea.bp3-input::placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark textarea.bp3-input:focus{-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark textarea.bp3-input[readonly]{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark textarea.bp3-input:disabled,.jupyter-wrapper .bp3-dark textarea.bp3-input.bp3-disabled{background:rgba(57,75,89,.5);-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper label.bp3-label{display:block;margin-bottom:15px;margin-top:0}.jupyter-wrapper label.bp3-label .bp3-html-select,.jupyter-wrapper label.bp3-label .bp3-input,.jupyter-wrapper label.bp3-label .bp3-select,.jupyter-wrapper label.bp3-label .bp3-slider,.jupyter-wrapper label.bp3-label .bp3-popover-wrapper{display:block;margin-top:5px;text-transform:none}.jupyter-wrapper label.bp3-label .bp3-button-group{margin-top:5px}.jupyter-wrapper label.bp3-label .bp3-select select,.jupyter-wrapper label.bp3-label .bp3-html-select select{font-weight:400;vertical-align:top;width:100%}.jupyter-wrapper label.bp3-label.bp3-disabled,.jupyter-wrapper label.bp3-label.bp3-disabled .bp3-text-muted{color:#5c708099}.jupyter-wrapper label.bp3-label.bp3-inline{line-height:30px}.jupyter-wrapper label.bp3-label.bp3-inline .bp3-html-select,.jupyter-wrapper label.bp3-label.bp3-inline .bp3-input,.jupyter-wrapper label.bp3-label.bp3-inline .bp3-input-group,.jupyter-wrapper label.bp3-label.bp3-inline .bp3-select,.jupyter-wrapper label.bp3-label.bp3-inline .bp3-popover-wrapper{display:inline-block;margin:0 0 0 5px;vertical-align:top}.jupyter-wrapper label.bp3-label.bp3-inline .bp3-button-group{margin:0 0 0 5px}.jupyter-wrapper label.bp3-label.bp3-inline .bp3-input-group .bp3-input{margin-left:0}.jupyter-wrapper label.bp3-label.bp3-inline.bp3-large{line-height:40px}.jupyter-wrapper label.bp3-label:not(.bp3-inline) .bp3-popover-target{display:block}.jupyter-wrapper .bp3-dark label.bp3-label{color:#f5f8fa}.jupyter-wrapper .bp3-dark label.bp3-label.bp3-disabled,.jupyter-wrapper .bp3-dark label.bp3-label.bp3-disabled .bp3-text-muted{color:#a7b6c299}.jupyter-wrapper .bp3-numeric-input .bp3-button-group.bp3-vertical>.bp3-button{-webkit-box-flex:1;-ms-flex:1 1 14px;flex:1 1 14px;min-height:0;padding:0;width:30px}.jupyter-wrapper .bp3-numeric-input .bp3-button-group.bp3-vertical>.bp3-button:first-child{border-radius:0 3px 0 0}.jupyter-wrapper .bp3-numeric-input .bp3-button-group.bp3-vertical>.bp3-button:last-child{border-radius:0 0 3px}.jupyter-wrapper .bp3-numeric-input .bp3-button-group.bp3-vertical:first-child>.bp3-button:first-child{border-radius:3px 0 0}.jupyter-wrapper .bp3-numeric-input .bp3-button-group.bp3-vertical:first-child>.bp3-button:last-child{border-radius:0 0 0 3px}.jupyter-wrapper .bp3-numeric-input.bp3-large .bp3-button-group.bp3-vertical>.bp3-button{width:40px}.jupyter-wrapper form{display:block}.jupyter-wrapper .bp3-html-select select,.jupyter-wrapper .bp3-select select{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:center;-ms-flex-align:center;align-items:center;border:none;cursor:pointer;font-size:14px;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;text-align:left;vertical-align:middle;background-color:#f5f8fa;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.8)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.8),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a;color:#182026;-moz-appearance:none;-webkit-appearance:none;border-radius:3px;height:30px;padding:0 25px 0 10px;width:100%}.jupyter-wrapper .bp3-html-select select>*,.jupyter-wrapper .bp3-select select>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-html-select select>.bp3-fill,.jupyter-wrapper .bp3-select select>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-html-select select:before,.jupyter-wrapper .bp3-select select:before,.jupyter-wrapper .bp3-html-select select>*,.jupyter-wrapper .bp3-select select>*{margin-right:7px}.jupyter-wrapper .bp3-html-select select:empty:before,.jupyter-wrapper .bp3-select select:empty:before,.jupyter-wrapper .bp3-html-select select>:last-child,.jupyter-wrapper .bp3-select select>:last-child{margin-right:0}.jupyter-wrapper .bp3-html-select select:hover,.jupyter-wrapper .bp3-select select:hover{background-clip:padding-box;background-color:#ebf1f5;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a}.jupyter-wrapper .bp3-html-select select:active,.jupyter-wrapper .bp3-select select:active,.jupyter-wrapper .bp3-html-select select.bp3-active,.jupyter-wrapper .bp3-select select.bp3-active{background-color:#d8e1e8;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-html-select select:disabled,.jupyter-wrapper .bp3-select select:disabled,.jupyter-wrapper .bp3-html-select select.bp3-disabled,.jupyter-wrapper .bp3-select select.bp3-disabled{background-color:#ced9e080;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;outline:none}.jupyter-wrapper .bp3-html-select select:disabled.bp3-active,.jupyter-wrapper .bp3-select select:disabled.bp3-active,.jupyter-wrapper .bp3-html-select select:disabled.bp3-active:hover,.jupyter-wrapper .bp3-select select:disabled.bp3-active:hover,.jupyter-wrapper .bp3-html-select select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select select.bp3-disabled.bp3-active:hover,.jupyter-wrapper .bp3-select select.bp3-disabled.bp3-active:hover{background:rgba(206,217,224,.7)}.jupyter-wrapper .bp3-html-select.bp3-minimal select,.jupyter-wrapper .bp3-select.bp3-minimal select{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-html-select.bp3-minimal select:hover,.jupyter-wrapper .bp3-select.bp3-minimal select:hover{background:rgba(167,182,194,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026;text-decoration:none}.jupyter-wrapper .bp3-html-select.bp3-minimal select:active,.jupyter-wrapper .bp3-select.bp3-minimal select:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-active{background:rgba(115,134,148,.3);-webkit-box-shadow:none;box-shadow:none;color:#182026}.jupyter-wrapper .bp3-html-select.bp3-minimal select:disabled,.jupyter-wrapper .bp3-select.bp3-minimal select:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal select:disabled:hover,.jupyter-wrapper .bp3-select.bp3-minimal select:disabled:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-disabled:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-disabled:hover{background:none;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-html-select.bp3-minimal select:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select:disabled:hover.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select:disabled:hover.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-disabled:hover.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-disabled:hover.bp3-active{background:rgba(115,134,148,.3)}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select{background:none;-webkit-box-shadow:none;box-shadow:none;color:inherit}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:hover,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:hover{background:rgba(138,155,168,.15)}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-active{background:rgba(138,155,168,.3);color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:disabled,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:disabled:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:disabled:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:disabled:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:disabled:hover,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-disabled:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-disabled:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-disabled:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-disabled:hover{background:none;color:#a7b6c299;cursor:not-allowed}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select:disabled:hover.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select:disabled:hover.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select:disabled:hover.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select:disabled:hover.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-disabled:hover.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-disabled:hover.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-disabled:hover.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-disabled:hover.bp3-active{background:rgba(138,155,168,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary{color:#106ba3}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#106ba3}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary:hover{background:rgba(19,124,189,.15);color:#106ba3}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#106ba3}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-disabled{background:none;color:#106ba380}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-primary .bp3-button-spinner .bp3-spinner-head,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-primary .bp3-button-spinner .bp3-spinner-head{stroke:#106ba3}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary:hover{background:rgba(19,124,189,.2);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary:active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary:active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary:active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary:active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary.bp3-active{background:rgba(19,124,189,.3);color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary:disabled,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary.bp3-disabled{background:none;color:#48aff080}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-primary.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-primary.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-primary.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-primary.bp3-disabled.bp3-active{background:rgba(19,124,189,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success{color:#0d8050}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#0d8050}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success:hover{background:rgba(15,153,96,.15);color:#0d8050}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#0d8050}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success:disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success.bp3-disabled{background:none;color:#0d805080}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-success .bp3-button-spinner .bp3-spinner-head,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-success .bp3-button-spinner .bp3-spinner-head{stroke:#0d8050}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success:hover{background:rgba(15,153,96,.2);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success:active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success:active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success:active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success:active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success.bp3-active{background:rgba(15,153,96,.3);color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success:disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success:disabled,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success.bp3-disabled{background:none;color:#3dcc9180}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-success.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-success.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-success.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-success.bp3-disabled.bp3-active{background:rgba(15,153,96,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning{color:#bf7326}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#bf7326}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning:hover{background:rgba(217,130,43,.15);color:#bf7326}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#bf7326}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-disabled{background:none;color:#bf732680}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-warning .bp3-button-spinner .bp3-spinner-head,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-warning .bp3-button-spinner .bp3-spinner-head{stroke:#bf7326}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning:hover{background:rgba(217,130,43,.2);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning:active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning:active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning:active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning:active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning.bp3-active{background:rgba(217,130,43,.3);color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning:disabled,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning.bp3-disabled{background:none;color:#ffb36680}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-warning.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-warning.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-warning.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-warning.bp3-disabled.bp3-active{background:rgba(217,130,43,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger{color:#c23030}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-active{background:none;-webkit-box-shadow:none;box-shadow:none;color:#c23030}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger:hover,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger:hover{background:rgba(219,55,55,.15);color:#c23030}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger:active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger:active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#c23030}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-disabled{background:none;color:#c2303080}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-html-select.bp3-minimal select.bp3-intent-danger .bp3-button-spinner .bp3-spinner-head,.jupyter-wrapper .bp3-select.bp3-minimal select.bp3-intent-danger .bp3-button-spinner .bp3-spinner-head{stroke:#c23030}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger:hover,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger:hover,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger:hover,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger:hover{background:rgba(219,55,55,.2);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger:active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger:active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger:active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger:active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger.bp3-active{background:rgba(219,55,55,.3);color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger:disabled,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-disabled,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-disabled,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger.bp3-disabled{background:none;color:#ff737380}.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select.bp3-minimal select.bp3-intent-danger.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-html-select.bp3-minimal .bp3-dark select.bp3-intent-danger.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select.bp3-minimal select.bp3-intent-danger.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-select.bp3-minimal .bp3-dark select.bp3-intent-danger.bp3-disabled.bp3-active{background:rgba(219,55,55,.3)}.jupyter-wrapper .bp3-html-select.bp3-large select,.jupyter-wrapper .bp3-select.bp3-large select{font-size:16px;height:40px;padding-right:35px}.jupyter-wrapper .bp3-dark .bp3-html-select select,.jupyter-wrapper .bp3-dark .bp3-select select{background-color:#394b59;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.05)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.05),rgba(255,255,255,0));-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-html-select select:hover,.jupyter-wrapper .bp3-dark .bp3-select select:hover,.jupyter-wrapper .bp3-dark .bp3-html-select select:active,.jupyter-wrapper .bp3-dark .bp3-select select:active,.jupyter-wrapper .bp3-dark .bp3-html-select select.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select select.bp3-active{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-html-select select:hover,.jupyter-wrapper .bp3-dark .bp3-select select:hover{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-html-select select:active,.jupyter-wrapper .bp3-dark .bp3-select select:active,.jupyter-wrapper .bp3-dark .bp3-html-select select.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select select.bp3-active{background-color:#202b33;background-image:none;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.6),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a99,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-html-select select:disabled,.jupyter-wrapper .bp3-dark .bp3-select select:disabled,.jupyter-wrapper .bp3-dark .bp3-html-select select.bp3-disabled,.jupyter-wrapper .bp3-dark .bp3-select select.bp3-disabled{background-color:#394b5980;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-html-select select:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select select:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-html-select select.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-select select.bp3-disabled.bp3-active{background:rgba(57,75,89,.7)}.jupyter-wrapper .bp3-dark .bp3-html-select select .bp3-button-spinner .bp3-spinner-head,.jupyter-wrapper .bp3-dark .bp3-select select .bp3-button-spinner .bp3-spinner-head{background:rgba(16,22,26,.5);stroke:#8a9ba8}.jupyter-wrapper .bp3-html-select select:disabled,.jupyter-wrapper .bp3-select select:disabled{background-color:#ced9e080;-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-html-select .bp3-icon,.jupyter-wrapper .bp3-select .bp3-icon,.jupyter-wrapper .bp3-select:after{color:#5c7080;pointer-events:none;position:absolute;right:7px;top:7px}.jupyter-wrapper .bp3-html-select .bp3-disabled.bp3-icon,.jupyter-wrapper .bp3-select .bp3-disabled.bp3-icon,.jupyter-wrapper .bp3-disabled.bp3-select:after{color:#5c708099}.jupyter-wrapper .bp3-html-select,.jupyter-wrapper .bp3-select{display:inline-block;letter-spacing:normal;position:relative;vertical-align:middle}.jupyter-wrapper .bp3-html-select select::-ms-expand,.jupyter-wrapper .bp3-select select::-ms-expand{display:none}.jupyter-wrapper .bp3-html-select .bp3-icon,.jupyter-wrapper .bp3-select .bp3-icon{color:#5c7080}.jupyter-wrapper .bp3-html-select .bp3-icon:hover,.jupyter-wrapper .bp3-select .bp3-icon:hover{color:#182026}.jupyter-wrapper .bp3-dark .bp3-html-select .bp3-icon,.jupyter-wrapper .bp3-dark .bp3-select .bp3-icon{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-html-select .bp3-icon:hover,.jupyter-wrapper .bp3-dark .bp3-select .bp3-icon:hover{color:#f5f8fa}.jupyter-wrapper .bp3-html-select.bp3-large:after,.jupyter-wrapper .bp3-html-select.bp3-large .bp3-icon,.jupyter-wrapper .bp3-select.bp3-large:after,.jupyter-wrapper .bp3-select.bp3-large .bp3-icon{right:12px;top:12px}.jupyter-wrapper .bp3-html-select.bp3-fill,.jupyter-wrapper .bp3-html-select.bp3-fill select,.jupyter-wrapper .bp3-select.bp3-fill,.jupyter-wrapper .bp3-select.bp3-fill select{width:100%}.jupyter-wrapper .bp3-dark .bp3-html-select option,.jupyter-wrapper .bp3-dark .bp3-select option{background-color:#30404d;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-html-select option:disabled,.jupyter-wrapper .bp3-dark .bp3-select option:disabled{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-html-select:after,.jupyter-wrapper .bp3-dark .bp3-select:after{color:#a7b6c2}.jupyter-wrapper .bp3-select:after{font-family:Icons16,sans-serif;font-size:16px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;content:\"\ue6c6\"}.jupyter-wrapper .bp3-running-text table,.jupyter-wrapper table.bp3-html-table{border-spacing:0;font-size:14px}.jupyter-wrapper .bp3-running-text table th,.jupyter-wrapper table.bp3-html-table th,.jupyter-wrapper .bp3-running-text table td,.jupyter-wrapper table.bp3-html-table td{padding:11px;text-align:left;vertical-align:top}.jupyter-wrapper .bp3-running-text table th,.jupyter-wrapper table.bp3-html-table th{color:#182026;font-weight:600}.jupyter-wrapper .bp3-running-text table td,.jupyter-wrapper table.bp3-html-table td{color:#182026}.jupyter-wrapper .bp3-running-text table tbody tr:first-child th,.jupyter-wrapper table.bp3-html-table tbody tr:first-child th,.jupyter-wrapper .bp3-running-text table tbody tr:first-child td,.jupyter-wrapper table.bp3-html-table tbody tr:first-child td,.jupyter-wrapper .bp3-running-text table tfoot tr:first-child th,.jupyter-wrapper table.bp3-html-table tfoot tr:first-child th,.jupyter-wrapper .bp3-running-text table tfoot tr:first-child td,.jupyter-wrapper table.bp3-html-table tfoot tr:first-child td{-webkit-box-shadow:inset 0 1px 0 0 rgba(16,22,26,.15);box-shadow:inset 0 1px #10161a26}.jupyter-wrapper .bp3-dark .bp3-running-text table th,.jupyter-wrapper .bp3-running-text .bp3-dark table th,.jupyter-wrapper .bp3-dark table.bp3-html-table th,.jupyter-wrapper .bp3-dark .bp3-running-text table td,.jupyter-wrapper .bp3-running-text .bp3-dark table td,.jupyter-wrapper .bp3-dark table.bp3-html-table td{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-running-text table tbody tr:first-child th,.jupyter-wrapper .bp3-running-text .bp3-dark table tbody tr:first-child th,.jupyter-wrapper .bp3-dark table.bp3-html-table tbody tr:first-child th,.jupyter-wrapper .bp3-dark .bp3-running-text table tbody tr:first-child td,.jupyter-wrapper .bp3-running-text .bp3-dark table tbody tr:first-child td,.jupyter-wrapper .bp3-dark table.bp3-html-table tbody tr:first-child td,.jupyter-wrapper .bp3-dark .bp3-running-text table tfoot tr:first-child th,.jupyter-wrapper .bp3-running-text .bp3-dark table tfoot tr:first-child th,.jupyter-wrapper .bp3-dark table.bp3-html-table tfoot tr:first-child th,.jupyter-wrapper .bp3-dark .bp3-running-text table tfoot tr:first-child td,.jupyter-wrapper .bp3-running-text .bp3-dark table tfoot tr:first-child td,.jupyter-wrapper .bp3-dark table.bp3-html-table tfoot tr:first-child td{-webkit-box-shadow:inset 0 1px 0 0 rgba(255,255,255,.15);box-shadow:inset 0 1px #ffffff26}.jupyter-wrapper table.bp3-html-table.bp3-html-table-condensed th,.jupyter-wrapper table.bp3-html-table.bp3-html-table-condensed td,.jupyter-wrapper table.bp3-html-table.bp3-small th,.jupyter-wrapper table.bp3-html-table.bp3-small td{padding-bottom:6px;padding-top:6px}.jupyter-wrapper table.bp3-html-table.bp3-html-table-striped tbody tr:nth-child(odd) td{background:rgba(191,204,214,.15)}.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered th:not(:first-child){-webkit-box-shadow:inset 1px 0 0 0 rgba(16,22,26,.15);box-shadow:inset 1px 0 #10161a26}.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered tbody tr td,.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered tfoot tr td{-webkit-box-shadow:inset 0 1px 0 0 rgba(16,22,26,.15);box-shadow:inset 0 1px #10161a26}.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered tbody tr td:not(:first-child),.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered tfoot tr td:not(:first-child){-webkit-box-shadow:inset 1px 1px 0 0 rgba(16,22,26,.15);box-shadow:inset 1px 1px #10161a26}.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered.bp3-html-table-striped tbody tr:not(:first-child) td{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper table.bp3-html-table.bp3-html-table-bordered.bp3-html-table-striped tbody tr:not(:first-child) td:not(:first-child){-webkit-box-shadow:inset 1px 0 0 0 rgba(16,22,26,.15);box-shadow:inset 1px 0 #10161a26}.jupyter-wrapper table.bp3-html-table.bp3-interactive tbody tr:hover td{background-color:#bfccd64d;cursor:pointer}.jupyter-wrapper table.bp3-html-table.bp3-interactive tbody tr:active td{background-color:#bfccd666}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-striped tbody tr:nth-child(odd) td{background:rgba(92,112,128,.15)}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered th:not(:first-child){-webkit-box-shadow:inset 1px 0 0 0 rgba(255,255,255,.15);box-shadow:inset 1px 0 #ffffff26}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered tbody tr td,.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered tfoot tr td{-webkit-box-shadow:inset 0 1px 0 0 rgba(255,255,255,.15);box-shadow:inset 0 1px #ffffff26}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered tbody tr td:not(:first-child),.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered tfoot tr td:not(:first-child){-webkit-box-shadow:inset 1px 1px 0 0 rgba(255,255,255,.15);box-shadow:inset 1px 1px #ffffff26}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered.bp3-html-table-striped tbody tr:not(:first-child) td{-webkit-box-shadow:inset 1px 0 0 0 rgba(255,255,255,.15);box-shadow:inset 1px 0 #ffffff26}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-html-table-bordered.bp3-html-table-striped tbody tr:not(:first-child) td:first-child{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-interactive tbody tr:hover td{background-color:#5c70804d;cursor:pointer}.jupyter-wrapper .bp3-dark table.bp3-html-table.bp3-interactive tbody tr:active td{background-color:#5c708066}.jupyter-wrapper .bp3-key-combo{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.jupyter-wrapper .bp3-key-combo>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-key-combo>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-key-combo:before,.jupyter-wrapper .bp3-key-combo>*{margin-right:5px}.jupyter-wrapper .bp3-key-combo:empty:before,.jupyter-wrapper .bp3-key-combo>:last-child{margin-right:0}.jupyter-wrapper .bp3-hotkey-dialog{padding-bottom:0;top:40px}.jupyter-wrapper .bp3-hotkey-dialog .bp3-dialog-body{margin:0;padding:0}.jupyter-wrapper .bp3-hotkey-dialog .bp3-hotkey-label{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1}.jupyter-wrapper .bp3-hotkey-column{margin:auto;max-height:80vh;overflow-y:auto;padding:30px}.jupyter-wrapper .bp3-hotkey-column .bp3-heading{margin-bottom:20px}.jupyter-wrapper .bp3-hotkey-column .bp3-heading:not(:first-child){margin-top:40px}.jupyter-wrapper .bp3-hotkey{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;margin-left:0;margin-right:0}.jupyter-wrapper .bp3-hotkey:not(:last-child){margin-bottom:10px}.jupyter-wrapper .bp3-icon{display:inline-block;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;vertical-align:text-bottom}.jupyter-wrapper .bp3-icon:not(:empty):before{content:\"\"!important;content:unset!important}.jupyter-wrapper .bp3-icon>svg{display:block}.jupyter-wrapper .bp3-icon>svg:not([fill]){fill:currentColor}.jupyter-wrapper .bp3-icon.bp3-intent-primary,.jupyter-wrapper .bp3-icon-standard.bp3-intent-primary,.jupyter-wrapper .bp3-icon-large.bp3-intent-primary{color:#106ba3}.jupyter-wrapper .bp3-dark .bp3-icon.bp3-intent-primary,.jupyter-wrapper .bp3-dark .bp3-icon-standard.bp3-intent-primary,.jupyter-wrapper .bp3-dark .bp3-icon-large.bp3-intent-primary{color:#48aff0}.jupyter-wrapper .bp3-icon.bp3-intent-success,.jupyter-wrapper .bp3-icon-standard.bp3-intent-success,.jupyter-wrapper .bp3-icon-large.bp3-intent-success{color:#0d8050}.jupyter-wrapper .bp3-dark .bp3-icon.bp3-intent-success,.jupyter-wrapper .bp3-dark .bp3-icon-standard.bp3-intent-success,.jupyter-wrapper .bp3-dark .bp3-icon-large.bp3-intent-success{color:#3dcc91}.jupyter-wrapper .bp3-icon.bp3-intent-warning,.jupyter-wrapper .bp3-icon-standard.bp3-intent-warning,.jupyter-wrapper .bp3-icon-large.bp3-intent-warning{color:#bf7326}.jupyter-wrapper .bp3-dark .bp3-icon.bp3-intent-warning,.jupyter-wrapper .bp3-dark .bp3-icon-standard.bp3-intent-warning,.jupyter-wrapper .bp3-dark .bp3-icon-large.bp3-intent-warning{color:#ffb366}.jupyter-wrapper .bp3-icon.bp3-intent-danger,.jupyter-wrapper .bp3-icon-standard.bp3-intent-danger,.jupyter-wrapper .bp3-icon-large.bp3-intent-danger{color:#c23030}.jupyter-wrapper .bp3-dark .bp3-icon.bp3-intent-danger,.jupyter-wrapper .bp3-dark .bp3-icon-standard.bp3-intent-danger,.jupyter-wrapper .bp3-dark .bp3-icon-large.bp3-intent-danger{color:#ff7373}.jupyter-wrapper span.bp3-icon-standard{font-family:Icons16,sans-serif;font-size:16px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block}.jupyter-wrapper span.bp3-icon-large{font-family:Icons20,sans-serif;font-size:20px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block}.jupyter-wrapper span.bp3-icon:empty{font-family:Icons20;font-size:inherit;font-style:normal;font-weight:400;line-height:1}.jupyter-wrapper span.bp3-icon:empty:before{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased}.jupyter-wrapper .bp3-icon-add:before{content:\"\ue63e\"}.jupyter-wrapper .bp3-icon-add-column-left:before{content:\"\ue6f9\"}.jupyter-wrapper .bp3-icon-add-column-right:before{content:\"\ue6fa\"}.jupyter-wrapper .bp3-icon-add-row-bottom:before{content:\"\ue6f8\"}.jupyter-wrapper .bp3-icon-add-row-top:before{content:\"\ue6f7\"}.jupyter-wrapper .bp3-icon-add-to-artifact:before{content:\"\ue67c\"}.jupyter-wrapper .bp3-icon-add-to-folder:before{content:\"\ue6d2\"}.jupyter-wrapper .bp3-icon-airplane:before{content:\"\ue74b\"}.jupyter-wrapper .bp3-icon-align-center:before{content:\"\ue603\"}.jupyter-wrapper .bp3-icon-align-justify:before{content:\"\ue605\"}.jupyter-wrapper .bp3-icon-align-left:before{content:\"\ue602\"}.jupyter-wrapper .bp3-icon-align-right:before{content:\"\ue604\"}.jupyter-wrapper .bp3-icon-alignment-bottom:before{content:\"\ue727\"}.jupyter-wrapper .bp3-icon-alignment-horizontal-center:before{content:\"\ue726\"}.jupyter-wrapper .bp3-icon-alignment-left:before{content:\"\ue722\"}.jupyter-wrapper .bp3-icon-alignment-right:before{content:\"\ue724\"}.jupyter-wrapper .bp3-icon-alignment-top:before{content:\"\ue725\"}.jupyter-wrapper .bp3-icon-alignment-vertical-center:before{content:\"\ue723\"}.jupyter-wrapper .bp3-icon-annotation:before{content:\"\ue6f0\"}.jupyter-wrapper .bp3-icon-application:before{content:\"\ue735\"}.jupyter-wrapper .bp3-icon-applications:before{content:\"\ue621\"}.jupyter-wrapper .bp3-icon-archive:before{content:\"\ue907\"}.jupyter-wrapper .bp3-icon-arrow-bottom-left:before{content:\"\u2199\"}.jupyter-wrapper .bp3-icon-arrow-bottom-right:before{content:\"\u2198\"}.jupyter-wrapper .bp3-icon-arrow-down:before{content:\"\u2193\"}.jupyter-wrapper .bp3-icon-arrow-left:before{content:\"\u2190\"}.jupyter-wrapper .bp3-icon-arrow-right:before{content:\"\u2192\"}.jupyter-wrapper .bp3-icon-arrow-top-left:before{content:\"\u2196\"}.jupyter-wrapper .bp3-icon-arrow-top-right:before{content:\"\u2197\"}.jupyter-wrapper .bp3-icon-arrow-up:before{content:\"\u2191\"}.jupyter-wrapper .bp3-icon-arrows-horizontal:before{content:\"\u2194\"}.jupyter-wrapper .bp3-icon-arrows-vertical:before{content:\"\u2195\"}.jupyter-wrapper .bp3-icon-asterisk:before{content:\"*\"}.jupyter-wrapper .bp3-icon-automatic-updates:before{content:\"\ue65f\"}.jupyter-wrapper .bp3-icon-badge:before{content:\"\ue6e3\"}.jupyter-wrapper .bp3-icon-ban-circle:before{content:\"\ue69d\"}.jupyter-wrapper .bp3-icon-bank-account:before{content:\"\ue76f\"}.jupyter-wrapper .bp3-icon-barcode:before{content:\"\ue676\"}.jupyter-wrapper .bp3-icon-blank:before{content:\"\ue900\"}.jupyter-wrapper .bp3-icon-blocked-person:before{content:\"\ue768\"}.jupyter-wrapper .bp3-icon-bold:before{content:\"\ue606\"}.jupyter-wrapper .bp3-icon-book:before{content:\"\ue6b8\"}.jupyter-wrapper .bp3-icon-bookmark:before{content:\"\ue61a\"}.jupyter-wrapper .bp3-icon-box:before{content:\"\ue6bf\"}.jupyter-wrapper .bp3-icon-briefcase:before{content:\"\ue674\"}.jupyter-wrapper .bp3-icon-bring-data:before{content:\"\ue90a\"}.jupyter-wrapper .bp3-icon-build:before{content:\"\ue72d\"}.jupyter-wrapper .bp3-icon-calculator:before{content:\"\ue70b\"}.jupyter-wrapper .bp3-icon-calendar:before{content:\"\ue62b\"}.jupyter-wrapper .bp3-icon-camera:before{content:\"\ue69e\"}.jupyter-wrapper .bp3-icon-caret-down:before{content:\"\u2304\"}.jupyter-wrapper .bp3-icon-caret-left:before{content:\"\u2329\"}.jupyter-wrapper .bp3-icon-caret-right:before{content:\"\u232a\"}.jupyter-wrapper .bp3-icon-caret-up:before{content:\"\u2303\"}.jupyter-wrapper .bp3-icon-cell-tower:before{content:\"\ue770\"}.jupyter-wrapper .bp3-icon-MKDOCS_changes:before{content:\"\ue623\"}.jupyter-wrapper .bp3-icon-chart:before{content:\"\ue67e\"}.jupyter-wrapper .bp3-icon-chat:before{content:\"\ue689\"}.jupyter-wrapper .bp3-icon-chevron-backward:before{content:\"\ue6df\"}.jupyter-wrapper .bp3-icon-chevron-down:before{content:\"\ue697\"}.jupyter-wrapper .bp3-icon-chevron-forward:before{content:\"\ue6e0\"}.jupyter-wrapper .bp3-icon-chevron-left:before{content:\"\ue694\"}.jupyter-wrapper .bp3-icon-chevron-right:before{content:\"\ue695\"}.jupyter-wrapper .bp3-icon-chevron-up:before{content:\"\ue696\"}.jupyter-wrapper .bp3-icon-circle:before{content:\"\ue66a\"}.jupyter-wrapper .bp3-icon-circle-arrow-down:before{content:\"\ue68e\"}.jupyter-wrapper .bp3-icon-circle-arrow-left:before{content:\"\ue68c\"}.jupyter-wrapper .bp3-icon-circle-arrow-right:before{content:\"\ue68b\"}.jupyter-wrapper .bp3-icon-circle-arrow-up:before{content:\"\ue68d\"}.jupyter-wrapper .bp3-icon-citation:before{content:\"\ue61b\"}.jupyter-wrapper .bp3-icon-clean:before{content:\"\ue7c5\"}.jupyter-wrapper .bp3-icon-clipboard:before{content:\"\ue61d\"}.jupyter-wrapper .bp3-icon-cloud:before{content:\"\u2601\"}.jupyter-wrapper .bp3-icon-cloud-download:before{content:\"\ue690\"}.jupyter-wrapper .bp3-icon-cloud-upload:before{content:\"\ue691\"}.jupyter-wrapper .bp3-icon-code:before{content:\"\ue661\"}.jupyter-wrapper .bp3-icon-code-block:before{content:\"\ue6c5\"}.jupyter-wrapper .bp3-icon-cog:before{content:\"\ue645\"}.jupyter-wrapper .bp3-icon-collapse-all:before{content:\"\ue763\"}.jupyter-wrapper .bp3-icon-column-layout:before{content:\"\ue6da\"}.jupyter-wrapper .bp3-icon-comment:before{content:\"\ue68a\"}.jupyter-wrapper .bp3-icon-comparison:before{content:\"\ue637\"}.jupyter-wrapper .bp3-icon-compass:before{content:\"\ue79c\"}.jupyter-wrapper .bp3-icon-compressed:before{content:\"\ue6c0\"}.jupyter-wrapper .bp3-icon-confirm:before{content:\"\ue639\"}.jupyter-wrapper .bp3-icon-console:before{content:\"\ue79b\"}.jupyter-wrapper .bp3-icon-contrast:before{content:\"\ue6cb\"}.jupyter-wrapper .bp3-icon-control:before{content:\"\ue67f\"}.jupyter-wrapper .bp3-icon-credit-card:before{content:\"\ue649\"}.jupyter-wrapper .bp3-icon-cross:before{content:\"\u2717\"}.jupyter-wrapper .bp3-icon-crown:before{content:\"\ue7b4\"}.jupyter-wrapper .bp3-icon-cube:before{content:\"\ue7c8\"}.jupyter-wrapper .bp3-icon-cube-add:before{content:\"\ue7c9\"}.jupyter-wrapper .bp3-icon-cube-remove:before{content:\"\ue7d0\"}.jupyter-wrapper .bp3-icon-curved-range-chart:before{content:\"\ue71b\"}.jupyter-wrapper .bp3-icon-cut:before{content:\"\ue6ef\"}.jupyter-wrapper .bp3-icon-dashboard:before{content:\"\ue751\"}.jupyter-wrapper .bp3-icon-data-lineage:before{content:\"\ue908\"}.jupyter-wrapper .bp3-icon-database:before{content:\"\ue683\"}.jupyter-wrapper .bp3-icon-delete:before{content:\"\ue644\"}.jupyter-wrapper .bp3-icon-delta:before{content:\"\u0394\"}.jupyter-wrapper .bp3-icon-derive-column:before{content:\"\ue739\"}.jupyter-wrapper .bp3-icon-desktop:before{content:\"\ue6af\"}.jupyter-wrapper .bp3-icon-diagnosis:before{content:\"\ue90d\"}.jupyter-wrapper .bp3-icon-diagram-tree:before{content:\"\ue7b3\"}.jupyter-wrapper .bp3-icon-direction-left:before{content:\"\ue681\"}.jupyter-wrapper .bp3-icon-direction-right:before{content:\"\ue682\"}.jupyter-wrapper .bp3-icon-disable:before{content:\"\ue600\"}.jupyter-wrapper .bp3-icon-document:before{content:\"\ue630\"}.jupyter-wrapper .bp3-icon-document-open:before{content:\"\ue71e\"}.jupyter-wrapper .bp3-icon-document-share:before{content:\"\ue71f\"}.jupyter-wrapper .bp3-icon-dollar:before{content:\"$\"}.jupyter-wrapper .bp3-icon-dot:before{content:\"\u2022\"}.jupyter-wrapper .bp3-icon-double-caret-horizontal:before{content:\"\ue6c7\"}.jupyter-wrapper .bp3-icon-double-caret-vertical:before{content:\"\ue6c6\"}.jupyter-wrapper .bp3-icon-double-chevron-down:before{content:\"\ue703\"}.jupyter-wrapper .bp3-icon-double-chevron-left:before{content:\"\ue6ff\"}.jupyter-wrapper .bp3-icon-double-chevron-right:before{content:\"\ue701\"}.jupyter-wrapper .bp3-icon-double-chevron-up:before{content:\"\ue702\"}.jupyter-wrapper .bp3-icon-doughnut-chart:before{content:\"\ue6ce\"}.jupyter-wrapper .bp3-icon-download:before{content:\"\ue62f\"}.jupyter-wrapper .bp3-icon-drag-handle-horizontal:before{content:\"\ue716\"}.jupyter-wrapper .bp3-icon-drag-handle-vertical:before{content:\"\ue715\"}.jupyter-wrapper .bp3-icon-draw:before{content:\"\ue66b\"}.jupyter-wrapper .bp3-icon-drive-time:before{content:\"\ue615\"}.jupyter-wrapper .bp3-icon-duplicate:before{content:\"\ue69c\"}.jupyter-wrapper .bp3-icon-edit:before{content:\"\u270e\"}.jupyter-wrapper .bp3-icon-eject:before{content:\"\u23cf\"}.jupyter-wrapper .bp3-icon-endorsed:before{content:\"\ue75f\"}.jupyter-wrapper .bp3-icon-envelope:before{content:\"\u2709\"}.jupyter-wrapper .bp3-icon-equals:before{content:\"\ue7d9\"}.jupyter-wrapper .bp3-icon-eraser:before{content:\"\ue773\"}.jupyter-wrapper .bp3-icon-error:before{content:\"\ue648\"}.jupyter-wrapper .bp3-icon-euro:before{content:\"\u20ac\"}.jupyter-wrapper .bp3-icon-MKDOCS_exchange:before{content:\"\ue636\"}.jupyter-wrapper .bp3-icon-exclude-row:before{content:\"\ue6ea\"}.jupyter-wrapper .bp3-icon-expand-all:before{content:\"\ue764\"}.jupyter-wrapper .bp3-icon-export:before{content:\"\ue633\"}.jupyter-wrapper .bp3-icon-eye-off:before{content:\"\ue6cc\"}.jupyter-wrapper .bp3-icon-eye-on:before{content:\"\ue75a\"}.jupyter-wrapper .bp3-icon-eye-open:before{content:\"\ue66f\"}.jupyter-wrapper .bp3-icon-fast-backward:before{content:\"\ue6a8\"}.jupyter-wrapper .bp3-icon-fast-forward:before{content:\"\ue6ac\"}.jupyter-wrapper .bp3-icon-feed:before{content:\"\ue656\"}.jupyter-wrapper .bp3-icon-feed-subscribed:before{content:\"\ue78f\"}.jupyter-wrapper .bp3-icon-film:before{content:\"\ue6a1\"}.jupyter-wrapper .bp3-icon-filter:before{content:\"\ue638\"}.jupyter-wrapper .bp3-icon-filter-keep:before{content:\"\ue78c\"}.jupyter-wrapper .bp3-icon-filter-list:before{content:\"\ue6ee\"}.jupyter-wrapper .bp3-icon-filter-open:before{content:\"\ue7d7\"}.jupyter-wrapper .bp3-icon-filter-remove:before{content:\"\ue78d\"}.jupyter-wrapper .bp3-icon-flag:before{content:\"\u2691\"}.jupyter-wrapper .bp3-icon-flame:before{content:\"\ue7a9\"}.jupyter-wrapper .bp3-icon-flash:before{content:\"\ue6b3\"}.jupyter-wrapper .bp3-icon-floppy-disk:before{content:\"\ue6b7\"}.jupyter-wrapper .bp3-icon-flow-branch:before{content:\"\ue7c1\"}.jupyter-wrapper .bp3-icon-flow-end:before{content:\"\ue7c4\"}.jupyter-wrapper .bp3-icon-flow-linear:before{content:\"\ue7c0\"}.jupyter-wrapper .bp3-icon-flow-review:before{content:\"\ue7c2\"}.jupyter-wrapper .bp3-icon-flow-review-branch:before{content:\"\ue7c3\"}.jupyter-wrapper .bp3-icon-flows:before{content:\"\ue659\"}.jupyter-wrapper .bp3-icon-folder-close:before{content:\"\ue652\"}.jupyter-wrapper .bp3-icon-folder-new:before{content:\"\ue7b0\"}.jupyter-wrapper .bp3-icon-folder-open:before{content:\"\ue651\"}.jupyter-wrapper .bp3-icon-folder-shared:before{content:\"\ue653\"}.jupyter-wrapper .bp3-icon-folder-shared-open:before{content:\"\ue670\"}.jupyter-wrapper .bp3-icon-follower:before{content:\"\ue760\"}.jupyter-wrapper .bp3-icon-following:before{content:\"\ue761\"}.jupyter-wrapper .bp3-icon-font:before{content:\"\ue6b4\"}.jupyter-wrapper .bp3-icon-fork:before{content:\"\ue63a\"}.jupyter-wrapper .bp3-icon-form:before{content:\"\ue795\"}.jupyter-wrapper .bp3-icon-full-circle:before{content:\"\ue685\"}.jupyter-wrapper .bp3-icon-full-stacked-chart:before{content:\"\ue75e\"}.jupyter-wrapper .bp3-icon-fullscreen:before{content:\"\ue699\"}.jupyter-wrapper .bp3-icon-function:before{content:\"\ue6e5\"}.jupyter-wrapper .bp3-icon-gantt-chart:before{content:\"\ue6f4\"}.jupyter-wrapper .bp3-icon-geolocation:before{content:\"\ue640\"}.jupyter-wrapper .bp3-icon-geosearch:before{content:\"\ue613\"}.jupyter-wrapper .bp3-icon-git-branch:before{content:\"\ue72a\"}.jupyter-wrapper .bp3-icon-git-commit:before{content:\"\ue72b\"}.jupyter-wrapper .bp3-icon-git-merge:before{content:\"\ue729\"}.jupyter-wrapper .bp3-icon-git-new-branch:before{content:\"\ue749\"}.jupyter-wrapper .bp3-icon-git-pull:before{content:\"\ue728\"}.jupyter-wrapper .bp3-icon-git-push:before{content:\"\ue72c\"}.jupyter-wrapper .bp3-icon-git-repo:before{content:\"\ue748\"}.jupyter-wrapper .bp3-icon-glass:before{content:\"\ue6b1\"}.jupyter-wrapper .bp3-icon-globe:before{content:\"\ue666\"}.jupyter-wrapper .bp3-icon-globe-network:before{content:\"\ue7b5\"}.jupyter-wrapper .bp3-icon-graph:before{content:\"\ue673\"}.jupyter-wrapper .bp3-icon-graph-remove:before{content:\"\ue609\"}.jupyter-wrapper .bp3-icon-greater-than:before{content:\"\ue7e1\"}.jupyter-wrapper .bp3-icon-greater-than-or-equal-to:before{content:\"\ue7e2\"}.jupyter-wrapper .bp3-icon-grid:before{content:\"\ue6d0\"}.jupyter-wrapper .bp3-icon-grid-view:before{content:\"\ue6e4\"}.jupyter-wrapper .bp3-icon-group-objects:before{content:\"\ue60a\"}.jupyter-wrapper .bp3-icon-grouped-bar-chart:before{content:\"\ue75d\"}.jupyter-wrapper .bp3-icon-hand:before{content:\"\ue6de\"}.jupyter-wrapper .bp3-icon-hand-down:before{content:\"\ue6bb\"}.jupyter-wrapper .bp3-icon-hand-left:before{content:\"\ue6bc\"}.jupyter-wrapper .bp3-icon-hand-right:before{content:\"\ue6b9\"}.jupyter-wrapper .bp3-icon-hand-up:before{content:\"\ue6ba\"}.jupyter-wrapper .bp3-icon-header:before{content:\"\ue6b5\"}.jupyter-wrapper .bp3-icon-header-one:before{content:\"\ue793\"}.jupyter-wrapper .bp3-icon-header-two:before{content:\"\ue794\"}.jupyter-wrapper .bp3-icon-headset:before{content:\"\ue6dc\"}.jupyter-wrapper .bp3-icon-heart:before{content:\"\u2665\"}.jupyter-wrapper .bp3-icon-heart-broken:before{content:\"\ue7a2\"}.jupyter-wrapper .bp3-icon-heat-grid:before{content:\"\ue6f3\"}.jupyter-wrapper .bp3-icon-heatmap:before{content:\"\ue614\"}.jupyter-wrapper .bp3-icon-help:before{content:\"?\"}.jupyter-wrapper .bp3-icon-helper-management:before{content:\"\ue66d\"}.jupyter-wrapper .bp3-icon-highlight:before{content:\"\ue6ed\"}.jupyter-wrapper .bp3-icon-history:before{content:\"\ue64a\"}.jupyter-wrapper .bp3-icon-home:before{content:\"\u2302\"}.jupyter-wrapper .bp3-icon-horizontal-bar-chart:before{content:\"\ue70c\"}.jupyter-wrapper .bp3-icon-horizontal-bar-chart-asc:before{content:\"\ue75c\"}.jupyter-wrapper .bp3-icon-horizontal-bar-chart-desc:before{content:\"\ue71d\"}.jupyter-wrapper .bp3-icon-horizontal-distribution:before{content:\"\ue720\"}.jupyter-wrapper .bp3-icon-id-number:before{content:\"\ue771\"}.jupyter-wrapper .bp3-icon-image-rotate-left:before{content:\"\ue73a\"}.jupyter-wrapper .bp3-icon-image-rotate-right:before{content:\"\ue73b\"}.jupyter-wrapper .bp3-icon-import:before{content:\"\ue632\"}.jupyter-wrapper .bp3-icon-inbox:before{content:\"\ue629\"}.jupyter-wrapper .bp3-icon-inbox-filtered:before{content:\"\ue7d1\"}.jupyter-wrapper .bp3-icon-inbox-geo:before{content:\"\ue7d2\"}.jupyter-wrapper .bp3-icon-inbox-search:before{content:\"\ue7d3\"}.jupyter-wrapper .bp3-icon-inbox-update:before{content:\"\ue7d4\"}.jupyter-wrapper .bp3-icon-info-sign:before{content:\"\u2139\"}.jupyter-wrapper .bp3-icon-inheritance:before{content:\"\ue7d5\"}.jupyter-wrapper .bp3-icon-inner-join:before{content:\"\ue7a3\"}.jupyter-wrapper .bp3-icon-insert:before{content:\"\ue66c\"}.jupyter-wrapper .bp3-icon-intersection:before{content:\"\ue765\"}.jupyter-wrapper .bp3-icon-ip-address:before{content:\"\ue772\"}.jupyter-wrapper .bp3-icon-issue:before{content:\"\ue774\"}.jupyter-wrapper .bp3-icon-issue-closed:before{content:\"\ue776\"}.jupyter-wrapper .bp3-icon-issue-new:before{content:\"\ue775\"}.jupyter-wrapper .bp3-icon-italic:before{content:\"\ue607\"}.jupyter-wrapper .bp3-icon-join-table:before{content:\"\ue738\"}.jupyter-wrapper .bp3-icon-key:before{content:\"\ue78e\"}.jupyter-wrapper .bp3-icon-key-backspace:before{content:\"\ue707\"}.jupyter-wrapper .bp3-icon-key-command:before{content:\"\ue705\"}.jupyter-wrapper .bp3-icon-key-control:before{content:\"\ue704\"}.jupyter-wrapper .bp3-icon-key-delete:before{content:\"\ue708\"}.jupyter-wrapper .bp3-icon-key-enter:before{content:\"\ue70a\"}.jupyter-wrapper .bp3-icon-key-escape:before{content:\"\ue709\"}.jupyter-wrapper .bp3-icon-key-option:before{content:\"\ue742\"}.jupyter-wrapper .bp3-icon-key-shift:before{content:\"\ue706\"}.jupyter-wrapper .bp3-icon-key-tab:before{content:\"\ue757\"}.jupyter-wrapper .bp3-icon-known-vehicle:before{content:\"\ue73c\"}.jupyter-wrapper .bp3-icon-lab-test:before{content:\"\ue90e\"}.jupyter-wrapper .bp3-icon-label:before{content:\"\ue665\"}.jupyter-wrapper .bp3-icon-layer:before{content:\"\ue6cf\"}.jupyter-wrapper .bp3-icon-layers:before{content:\"\ue618\"}.jupyter-wrapper .bp3-icon-layout:before{content:\"\ue60c\"}.jupyter-wrapper .bp3-icon-layout-auto:before{content:\"\ue60d\"}.jupyter-wrapper .bp3-icon-layout-balloon:before{content:\"\ue6d3\"}.jupyter-wrapper .bp3-icon-layout-circle:before{content:\"\ue60e\"}.jupyter-wrapper .bp3-icon-layout-grid:before{content:\"\ue610\"}.jupyter-wrapper .bp3-icon-layout-group-by:before{content:\"\ue611\"}.jupyter-wrapper .bp3-icon-layout-hierarchy:before{content:\"\ue60f\"}.jupyter-wrapper .bp3-icon-layout-linear:before{content:\"\ue6c3\"}.jupyter-wrapper .bp3-icon-layout-skew-grid:before{content:\"\ue612\"}.jupyter-wrapper .bp3-icon-layout-sorted-clusters:before{content:\"\ue6d4\"}.jupyter-wrapper .bp3-icon-learning:before{content:\"\ue904\"}.jupyter-wrapper .bp3-icon-left-join:before{content:\"\ue7a4\"}.jupyter-wrapper .bp3-icon-less-than:before{content:\"\ue7e3\"}.jupyter-wrapper .bp3-icon-less-than-or-equal-to:before{content:\"\ue7e4\"}.jupyter-wrapper .bp3-icon-lifesaver:before{content:\"\ue7c7\"}.jupyter-wrapper .bp3-icon-lightbulb:before{content:\"\ue6b0\"}.jupyter-wrapper .bp3-icon-link:before{content:\"\ue62d\"}.jupyter-wrapper .bp3-icon-list:before{content:\"\u2630\"}.jupyter-wrapper .bp3-icon-list-columns:before{content:\"\ue7b9\"}.jupyter-wrapper .bp3-icon-list-detail-view:before{content:\"\ue743\"}.jupyter-wrapper .bp3-icon-locate:before{content:\"\ue619\"}.jupyter-wrapper .bp3-icon-lock:before{content:\"\ue625\"}.jupyter-wrapper .bp3-icon-log-in:before{content:\"\ue69a\"}.jupyter-wrapper .bp3-icon-log-out:before{content:\"\ue64c\"}.jupyter-wrapper .bp3-icon-manual:before{content:\"\ue6f6\"}.jupyter-wrapper .bp3-icon-manually-entered-data:before{content:\"\ue74a\"}.jupyter-wrapper .bp3-icon-map:before{content:\"\ue662\"}.jupyter-wrapper .bp3-icon-map-create:before{content:\"\ue741\"}.jupyter-wrapper .bp3-icon-map-marker:before{content:\"\ue67d\"}.jupyter-wrapper .bp3-icon-maximize:before{content:\"\ue635\"}.jupyter-wrapper .bp3-icon-media:before{content:\"\ue62c\"}.jupyter-wrapper .bp3-icon-menu:before{content:\"\ue762\"}.jupyter-wrapper .bp3-icon-menu-closed:before{content:\"\ue655\"}.jupyter-wrapper .bp3-icon-menu-open:before{content:\"\ue654\"}.jupyter-wrapper .bp3-icon-merge-columns:before{content:\"\ue74f\"}.jupyter-wrapper .bp3-icon-merge-links:before{content:\"\ue60b\"}.jupyter-wrapper .bp3-icon-minimize:before{content:\"\ue634\"}.jupyter-wrapper .bp3-icon-minus:before{content:\"\u2212\"}.jupyter-wrapper .bp3-icon-mobile-phone:before{content:\"\ue717\"}.jupyter-wrapper .bp3-icon-mobile-video:before{content:\"\ue69f\"}.jupyter-wrapper .bp3-icon-moon:before{content:\"\ue754\"}.jupyter-wrapper .bp3-icon-more:before{content:\"\ue62a\"}.jupyter-wrapper .bp3-icon-mountain:before{content:\"\ue7b1\"}.jupyter-wrapper .bp3-icon-move:before{content:\"\ue693\"}.jupyter-wrapper .bp3-icon-mugshot:before{content:\"\ue6db\"}.jupyter-wrapper .bp3-icon-multi-select:before{content:\"\ue680\"}.jupyter-wrapper .bp3-icon-music:before{content:\"\ue6a6\"}.jupyter-wrapper .bp3-icon-new-drawing:before{content:\"\ue905\"}.jupyter-wrapper .bp3-icon-new-grid-item:before{content:\"\ue747\"}.jupyter-wrapper .bp3-icon-new-layer:before{content:\"\ue902\"}.jupyter-wrapper .bp3-icon-new-layers:before{content:\"\ue903\"}.jupyter-wrapper .bp3-icon-new-link:before{content:\"\ue65c\"}.jupyter-wrapper .bp3-icon-new-object:before{content:\"\ue65d\"}.jupyter-wrapper .bp3-icon-new-person:before{content:\"\ue6e9\"}.jupyter-wrapper .bp3-icon-new-prescription:before{content:\"\ue78b\"}.jupyter-wrapper .bp3-icon-new-text-box:before{content:\"\ue65b\"}.jupyter-wrapper .bp3-icon-ninja:before{content:\"\ue675\"}.jupyter-wrapper .bp3-icon-not-equal-to:before{content:\"\ue7e0\"}.jupyter-wrapper .bp3-icon-notifications:before{content:\"\ue624\"}.jupyter-wrapper .bp3-icon-notifications-updated:before{content:\"\ue7b8\"}.jupyter-wrapper .bp3-icon-numbered-list:before{content:\"\ue746\"}.jupyter-wrapper .bp3-icon-numerical:before{content:\"\ue756\"}.jupyter-wrapper .bp3-icon-office:before{content:\"\ue69b\"}.jupyter-wrapper .bp3-icon-offline:before{content:\"\ue67a\"}.jupyter-wrapper .bp3-icon-oil-field:before{content:\"\ue73f\"}.jupyter-wrapper .bp3-icon-one-column:before{content:\"\ue658\"}.jupyter-wrapper .bp3-icon-outdated:before{content:\"\ue7a8\"}.jupyter-wrapper .bp3-icon-page-layout:before{content:\"\ue660\"}.jupyter-wrapper .bp3-icon-panel-stats:before{content:\"\ue777\"}.jupyter-wrapper .bp3-icon-panel-table:before{content:\"\ue778\"}.jupyter-wrapper .bp3-icon-paperclip:before{content:\"\ue664\"}.jupyter-wrapper .bp3-icon-paragraph:before{content:\"\ue76c\"}.jupyter-wrapper .bp3-icon-path:before{content:\"\ue753\"}.jupyter-wrapper .bp3-icon-path-search:before{content:\"\ue65e\"}.jupyter-wrapper .bp3-icon-pause:before{content:\"\ue6a9\"}.jupyter-wrapper .bp3-icon-people:before{content:\"\ue63d\"}.jupyter-wrapper .bp3-icon-percentage:before{content:\"\ue76a\"}.jupyter-wrapper .bp3-icon-person:before{content:\"\ue63c\"}.jupyter-wrapper .bp3-icon-phone:before{content:\"\u260e\"}.jupyter-wrapper .bp3-icon-pie-chart:before{content:\"\ue684\"}.jupyter-wrapper .bp3-icon-pin:before{content:\"\ue646\"}.jupyter-wrapper .bp3-icon-pivot:before{content:\"\ue6f1\"}.jupyter-wrapper .bp3-icon-pivot-table:before{content:\"\ue6eb\"}.jupyter-wrapper .bp3-icon-play:before{content:\"\ue6ab\"}.jupyter-wrapper .bp3-icon-plus:before{content:\"+\"}.jupyter-wrapper .bp3-icon-polygon-filter:before{content:\"\ue6d1\"}.jupyter-wrapper .bp3-icon-power:before{content:\"\ue6d9\"}.jupyter-wrapper .bp3-icon-predictive-analysis:before{content:\"\ue617\"}.jupyter-wrapper .bp3-icon-prescription:before{content:\"\ue78a\"}.jupyter-wrapper .bp3-icon-presentation:before{content:\"\ue687\"}.jupyter-wrapper .bp3-icon-print:before{content:\"\u2399\"}.jupyter-wrapper .bp3-icon-projects:before{content:\"\ue622\"}.jupyter-wrapper .bp3-icon-properties:before{content:\"\ue631\"}.jupyter-wrapper .bp3-icon-property:before{content:\"\ue65a\"}.jupyter-wrapper .bp3-icon-publish-function:before{content:\"\ue752\"}.jupyter-wrapper .bp3-icon-pulse:before{content:\"\ue6e8\"}.jupyter-wrapper .bp3-icon-random:before{content:\"\ue698\"}.jupyter-wrapper .bp3-icon-record:before{content:\"\ue6ae\"}.jupyter-wrapper .bp3-icon-redo:before{content:\"\ue6c4\"}.jupyter-wrapper .bp3-icon-refresh:before{content:\"\ue643\"}.jupyter-wrapper .bp3-icon-regression-chart:before{content:\"\ue758\"}.jupyter-wrapper .bp3-icon-remove:before{content:\"\ue63f\"}.jupyter-wrapper .bp3-icon-remove-column:before{content:\"\ue755\"}.jupyter-wrapper .bp3-icon-remove-column-left:before{content:\"\ue6fd\"}.jupyter-wrapper .bp3-icon-remove-column-right:before{content:\"\ue6fe\"}.jupyter-wrapper .bp3-icon-remove-row-bottom:before{content:\"\ue6fc\"}.jupyter-wrapper .bp3-icon-remove-row-top:before{content:\"\ue6fb\"}.jupyter-wrapper .bp3-icon-repeat:before{content:\"\ue692\"}.jupyter-wrapper .bp3-icon-reset:before{content:\"\ue7d6\"}.jupyter-wrapper .bp3-icon-resolve:before{content:\"\ue672\"}.jupyter-wrapper .bp3-icon-rig:before{content:\"\ue740\"}.jupyter-wrapper .bp3-icon-right-join:before{content:\"\ue7a5\"}.jupyter-wrapper .bp3-icon-ring:before{content:\"\ue6f2\"}.jupyter-wrapper .bp3-icon-rotate-document:before{content:\"\ue6e1\"}.jupyter-wrapper .bp3-icon-rotate-page:before{content:\"\ue6e2\"}.jupyter-wrapper .bp3-icon-satellite:before{content:\"\ue76b\"}.jupyter-wrapper .bp3-icon-saved:before{content:\"\ue6b6\"}.jupyter-wrapper .bp3-icon-scatter-plot:before{content:\"\ue73e\"}.jupyter-wrapper .bp3-icon-search:before{content:\"\ue64b\"}.jupyter-wrapper .bp3-icon-search-around:before{content:\"\ue608\"}.jupyter-wrapper .bp3-icon-search-template:before{content:\"\ue628\"}.jupyter-wrapper .bp3-icon-search-text:before{content:\"\ue663\"}.jupyter-wrapper .bp3-icon-segmented-control:before{content:\"\ue6ec\"}.jupyter-wrapper .bp3-icon-select:before{content:\"\ue616\"}.jupyter-wrapper .bp3-icon-selection:before{content:\"\u29bf\"}.jupyter-wrapper .bp3-icon-send-to:before{content:\"\ue66e\"}.jupyter-wrapper .bp3-icon-send-to-graph:before{content:\"\ue736\"}.jupyter-wrapper .bp3-icon-send-to-map:before{content:\"\ue737\"}.jupyter-wrapper .bp3-icon-series-add:before{content:\"\ue796\"}.jupyter-wrapper .bp3-icon-series-configuration:before{content:\"\ue79a\"}.jupyter-wrapper .bp3-icon-series-derived:before{content:\"\ue799\"}.jupyter-wrapper .bp3-icon-series-filtered:before{content:\"\ue798\"}.jupyter-wrapper .bp3-icon-series-search:before{content:\"\ue797\"}.jupyter-wrapper .bp3-icon-settings:before{content:\"\ue6a2\"}.jupyter-wrapper .bp3-icon-share:before{content:\"\ue62e\"}.jupyter-wrapper .bp3-icon-shield:before{content:\"\ue7b2\"}.jupyter-wrapper .bp3-icon-shop:before{content:\"\ue6c2\"}.jupyter-wrapper .bp3-icon-shopping-cart:before{content:\"\ue6c1\"}.jupyter-wrapper .bp3-icon-signal-search:before{content:\"\ue909\"}.jupyter-wrapper .bp3-icon-sim-card:before{content:\"\ue718\"}.jupyter-wrapper .bp3-icon-slash:before{content:\"\ue769\"}.jupyter-wrapper .bp3-icon-small-cross:before{content:\"\ue6d7\"}.jupyter-wrapper .bp3-icon-small-minus:before{content:\"\ue70e\"}.jupyter-wrapper .bp3-icon-small-plus:before{content:\"\ue70d\"}.jupyter-wrapper .bp3-icon-small-tick:before{content:\"\ue6d8\"}.jupyter-wrapper .bp3-icon-snowflake:before{content:\"\ue7b6\"}.jupyter-wrapper .bp3-icon-social-media:before{content:\"\ue671\"}.jupyter-wrapper .bp3-icon-sort:before{content:\"\ue64f\"}.jupyter-wrapper .bp3-icon-sort-alphabetical:before{content:\"\ue64d\"}.jupyter-wrapper .bp3-icon-sort-alphabetical-desc:before{content:\"\ue6c8\"}.jupyter-wrapper .bp3-icon-sort-asc:before{content:\"\ue6d5\"}.jupyter-wrapper .bp3-icon-sort-desc:before{content:\"\ue6d6\"}.jupyter-wrapper .bp3-icon-sort-numerical:before{content:\"\ue64e\"}.jupyter-wrapper .bp3-icon-sort-numerical-desc:before{content:\"\ue6c9\"}.jupyter-wrapper .bp3-icon-split-columns:before{content:\"\ue750\"}.jupyter-wrapper .bp3-icon-square:before{content:\"\ue686\"}.jupyter-wrapper .bp3-icon-stacked-chart:before{content:\"\ue6e7\"}.jupyter-wrapper .bp3-icon-star:before{content:\"\u2605\"}.jupyter-wrapper .bp3-icon-star-empty:before{content:\"\u2606\"}.jupyter-wrapper .bp3-icon-step-backward:before{content:\"\ue6a7\"}.jupyter-wrapper .bp3-icon-step-chart:before{content:\"\ue70f\"}.jupyter-wrapper .bp3-icon-step-forward:before{content:\"\ue6ad\"}.jupyter-wrapper .bp3-icon-stop:before{content:\"\ue6aa\"}.jupyter-wrapper .bp3-icon-stopwatch:before{content:\"\ue901\"}.jupyter-wrapper .bp3-icon-strikethrough:before{content:\"\ue7a6\"}.jupyter-wrapper .bp3-icon-style:before{content:\"\ue601\"}.jupyter-wrapper .bp3-icon-swap-horizontal:before{content:\"\ue745\"}.jupyter-wrapper .bp3-icon-swap-vertical:before{content:\"\ue744\"}.jupyter-wrapper .bp3-icon-symbol-circle:before{content:\"\ue72e\"}.jupyter-wrapper .bp3-icon-symbol-cross:before{content:\"\ue731\"}.jupyter-wrapper .bp3-icon-symbol-diamond:before{content:\"\ue730\"}.jupyter-wrapper .bp3-icon-symbol-square:before{content:\"\ue72f\"}.jupyter-wrapper .bp3-icon-symbol-triangle-down:before{content:\"\ue733\"}.jupyter-wrapper .bp3-icon-symbol-triangle-up:before{content:\"\ue732\"}.jupyter-wrapper .bp3-icon-tag:before{content:\"\ue61c\"}.jupyter-wrapper .bp3-icon-take-action:before{content:\"\ue6ca\"}.jupyter-wrapper .bp3-icon-taxi:before{content:\"\ue79e\"}.jupyter-wrapper .bp3-icon-text-highlight:before{content:\"\ue6dd\"}.jupyter-wrapper .bp3-icon-th:before{content:\"\ue667\"}.jupyter-wrapper .bp3-icon-th-derived:before{content:\"\ue669\"}.jupyter-wrapper .bp3-icon-th-disconnect:before{content:\"\ue7d8\"}.jupyter-wrapper .bp3-icon-th-filtered:before{content:\"\ue7c6\"}.jupyter-wrapper .bp3-icon-th-list:before{content:\"\ue668\"}.jupyter-wrapper .bp3-icon-thumbs-down:before{content:\"\ue6be\"}.jupyter-wrapper .bp3-icon-thumbs-up:before{content:\"\ue6bd\"}.jupyter-wrapper .bp3-icon-tick:before{content:\"\u2713\"}.jupyter-wrapper .bp3-icon-tick-circle:before{content:\"\ue779\"}.jupyter-wrapper .bp3-icon-time:before{content:\"\u23f2\"}.jupyter-wrapper .bp3-icon-timeline-area-chart:before{content:\"\ue6cd\"}.jupyter-wrapper .bp3-icon-timeline-bar-chart:before{content:\"\ue620\"}.jupyter-wrapper .bp3-icon-timeline-events:before{content:\"\ue61e\"}.jupyter-wrapper .bp3-icon-timeline-line-chart:before{content:\"\ue61f\"}.jupyter-wrapper .bp3-icon-tint:before{content:\"\ue6b2\"}.jupyter-wrapper .bp3-icon-torch:before{content:\"\ue677\"}.jupyter-wrapper .bp3-icon-tractor:before{content:\"\ue90c\"}.jupyter-wrapper .bp3-icon-train:before{content:\"\ue79f\"}.jupyter-wrapper .bp3-icon-translate:before{content:\"\ue759\"}.jupyter-wrapper .bp3-icon-trash:before{content:\"\ue63b\"}.jupyter-wrapper .bp3-icon-tree:before{content:\"\ue7b7\"}.jupyter-wrapper .bp3-icon-trending-down:before{content:\"\ue71a\"}.jupyter-wrapper .bp3-icon-trending-up:before{content:\"\ue719\"}.jupyter-wrapper .bp3-icon-truck:before{content:\"\ue90b\"}.jupyter-wrapper .bp3-icon-two-columns:before{content:\"\ue657\"}.jupyter-wrapper .bp3-icon-unarchive:before{content:\"\ue906\"}.jupyter-wrapper .bp3-icon-underline:before{content:\"\u2381\"}.jupyter-wrapper .bp3-icon-undo:before{content:\"\u238c\"}.jupyter-wrapper .bp3-icon-ungroup-objects:before{content:\"\ue688\"}.jupyter-wrapper .bp3-icon-unknown-vehicle:before{content:\"\ue73d\"}.jupyter-wrapper .bp3-icon-unlock:before{content:\"\ue626\"}.jupyter-wrapper .bp3-icon-unpin:before{content:\"\ue650\"}.jupyter-wrapper .bp3-icon-unresolve:before{content:\"\ue679\"}.jupyter-wrapper .bp3-icon-updated:before{content:\"\ue7a7\"}.jupyter-wrapper .bp3-icon-upload:before{content:\"\ue68f\"}.jupyter-wrapper .bp3-icon-user:before{content:\"\ue627\"}.jupyter-wrapper .bp3-icon-variable:before{content:\"\ue6f5\"}.jupyter-wrapper .bp3-icon-vertical-bar-chart-asc:before{content:\"\ue75b\"}.jupyter-wrapper .bp3-icon-vertical-bar-chart-desc:before{content:\"\ue71c\"}.jupyter-wrapper .bp3-icon-vertical-distribution:before{content:\"\ue721\"}.jupyter-wrapper .bp3-icon-video:before{content:\"\ue6a0\"}.jupyter-wrapper .bp3-icon-volume-down:before{content:\"\ue6a4\"}.jupyter-wrapper .bp3-icon-volume-off:before{content:\"\ue6a3\"}.jupyter-wrapper .bp3-icon-volume-up:before{content:\"\ue6a5\"}.jupyter-wrapper .bp3-icon-walk:before{content:\"\ue79d\"}.jupyter-wrapper .bp3-icon-warning-sign:before{content:\"\ue647\"}.jupyter-wrapper .bp3-icon-waterfall-chart:before{content:\"\ue6e6\"}.jupyter-wrapper .bp3-icon-widget:before{content:\"\ue678\"}.jupyter-wrapper .bp3-icon-widget-button:before{content:\"\ue790\"}.jupyter-wrapper .bp3-icon-widget-footer:before{content:\"\ue792\"}.jupyter-wrapper .bp3-icon-widget-header:before{content:\"\ue791\"}.jupyter-wrapper .bp3-icon-wrench:before{content:\"\ue734\"}.jupyter-wrapper .bp3-icon-zoom-in:before{content:\"\ue641\"}.jupyter-wrapper .bp3-icon-zoom-out:before{content:\"\ue642\"}.jupyter-wrapper .bp3-icon-zoom-to-fit:before{content:\"\ue67b\"}.jupyter-wrapper .bp3-submenu>.bp3-popover-wrapper{display:block}.jupyter-wrapper .bp3-submenu .bp3-popover-target{display:block}.jupyter-wrapper .bp3-submenu.bp3-popover{-webkit-box-shadow:none;box-shadow:none;padding:0 5px}.jupyter-wrapper .bp3-submenu.bp3-popover>.bp3-popover-content{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33}.jupyter-wrapper .bp3-dark .bp3-submenu.bp3-popover,.jupyter-wrapper .bp3-submenu.bp3-popover.bp3-dark{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-dark .bp3-submenu.bp3-popover>.bp3-popover-content,.jupyter-wrapper .bp3-submenu.bp3-popover.bp3-dark>.bp3-popover-content{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66}.jupyter-wrapper .bp3-menu{background:#ffffff;border-radius:3px;color:#182026;list-style:none;margin:0;min-width:180px;padding:5px;text-align:left}.jupyter-wrapper .bp3-menu-divider{border-top:1px solid rgba(16,22,26,.15);display:block;margin:5px}.jupyter-wrapper .bp3-dark .bp3-menu-divider{border-top-color:#ffffff26}.jupyter-wrapper .bp3-menu-item{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;border-radius:2px;color:inherit;line-height:20px;padding:5px 7px;text-decoration:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .bp3-menu-item>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-menu-item>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-menu-item:before,.jupyter-wrapper .bp3-menu-item>*{margin-right:7px}.jupyter-wrapper .bp3-menu-item:empty:before,.jupyter-wrapper .bp3-menu-item>:last-child{margin-right:0}.jupyter-wrapper .bp3-menu-item>.bp3-fill{word-break:break-word}.jupyter-wrapper .bp3-menu-item:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-menu-item{background-color:#a7b6c24d;cursor:pointer;text-decoration:none}.jupyter-wrapper .bp3-menu-item.bp3-disabled{background-color:inherit;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-dark .bp3-menu-item{color:inherit}.jupyter-wrapper .bp3-dark .bp3-menu-item:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-menu-item{background-color:#8a9ba826;color:inherit}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-disabled{background-color:inherit;color:#a7b6c299}.jupyter-wrapper .bp3-menu-item.bp3-intent-primary{color:#106ba3}.jupyter-wrapper .bp3-menu-item.bp3-intent-primary .bp3-icon{color:inherit}.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary .bp3-menu-item-label{color:#106ba3}.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary.bp3-active{background-color:#137cbd}.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:active{background-color:#106ba3}.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:hover:before,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:hover:after,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:active,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary:active .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary.bp3-active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary.bp3-active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-primary.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-menu-item.bp3-intent-success{color:#0d8050}.jupyter-wrapper .bp3-menu-item.bp3-intent-success .bp3-icon{color:inherit}.jupyter-wrapper .bp3-menu-item.bp3-intent-success:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-success .bp3-menu-item-label{color:#0d8050}.jupyter-wrapper .bp3-menu-item.bp3-intent-success:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-success.bp3-active{background-color:#0f9960}.jupyter-wrapper .bp3-menu-item.bp3-intent-success:active{background-color:#0d8050}.jupyter-wrapper .bp3-menu-item.bp3-intent-success:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:hover:before,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:hover:after,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:active,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-success:active .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-menu-item.bp3-intent-success.bp3-active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-success.bp3-active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-success.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-menu-item.bp3-intent-warning{color:#bf7326}.jupyter-wrapper .bp3-menu-item.bp3-intent-warning .bp3-icon{color:inherit}.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning .bp3-menu-item-label{color:#bf7326}.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning.bp3-active{background-color:#d9822b}.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:active{background-color:#bf7326}.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:hover:before,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:hover:after,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:active,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning:active .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning.bp3-active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning.bp3-active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-warning.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-menu-item.bp3-intent-danger{color:#c23030}.jupyter-wrapper .bp3-menu-item.bp3-intent-danger .bp3-icon{color:inherit}.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger .bp3-menu-item-label{color:#c23030}.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger.bp3-active{background-color:#db3737}.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:active{background-color:#c23030}.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:hover:before,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:hover:after,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:active,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger:active .bp3-menu-item-label,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger.bp3-active:before,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger.bp3-active:after,.jupyter-wrapper .bp3-menu-item.bp3-intent-danger.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-menu-item:before{font-family:Icons16,sans-serif;font-size:16px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;margin-right:7px}.jupyter-wrapper .bp3-menu-item:before,.jupyter-wrapper .bp3-menu-item>.bp3-icon{color:#5c7080;margin-top:2px}.jupyter-wrapper .bp3-menu-item .bp3-menu-item-label{color:#5c7080}.jupyter-wrapper .bp3-menu-item:hover,.jupyter-wrapper .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-menu-item{color:inherit}.jupyter-wrapper .bp3-menu-item.bp3-active,.jupyter-wrapper .bp3-menu-item:active{background-color:#7386944d}.jupyter-wrapper .bp3-menu-item.bp3-disabled{background-color:inherit!important;color:#5c708099!important;cursor:not-allowed!important;outline:none!important}.jupyter-wrapper .bp3-menu-item.bp3-disabled:before,.jupyter-wrapper .bp3-menu-item.bp3-disabled>.bp3-icon,.jupyter-wrapper .bp3-menu-item.bp3-disabled .bp3-menu-item-label{color:#5c708099!important}.jupyter-wrapper .bp3-large .bp3-menu-item{font-size:16px;line-height:22px;padding:9px 7px}.jupyter-wrapper .bp3-large .bp3-menu-item .bp3-icon{margin-top:3px}.jupyter-wrapper .bp3-large .bp3-menu-item:before{font-family:Icons20,sans-serif;font-size:20px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;margin-right:10px;margin-top:1px}.jupyter-wrapper button.bp3-menu-item{background:none;border:none;text-align:left;width:100%}.jupyter-wrapper .bp3-menu-header{border-top:1px solid rgba(16,22,26,.15);display:block;margin:5px;cursor:default;padding-left:2px}.jupyter-wrapper .bp3-dark .bp3-menu-header{border-top-color:#ffffff26}.jupyter-wrapper .bp3-menu-header:first-of-type{border-top:none}.jupyter-wrapper .bp3-menu-header>h6{color:#182026;font-weight:600;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;line-height:17px;margin:0;padding:10px 7px 0 1px}.jupyter-wrapper .bp3-menu-header:first-of-type>h6{padding-top:0}.jupyter-wrapper .bp3-large .bp3-menu-header>h6{font-size:18px;padding-bottom:5px;padding-top:15px}.jupyter-wrapper .bp3-large .bp3-menu-header:first-of-type>h6{padding-top:0}.jupyter-wrapper .bp3-dark .bp3-menu{background:#30404d;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary .bp3-icon{color:inherit}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary .bp3-menu-item-label{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary.bp3-active{background-color:#137cbd}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:active{background-color:#106ba3}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:hover:before,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item:before,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:hover:after,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item:after,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-primary.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary:active .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary.bp3-active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary.bp3-active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary.bp3-active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-primary.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success .bp3-icon{color:inherit}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success .bp3-menu-item-label{color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success.bp3-active{background-color:#0f9960}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:active{background-color:#0d8050}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:hover:before,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item:before,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:hover:after,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item:after,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-success.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success:active .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success.bp3-active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success.bp3-active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success.bp3-active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-success.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning .bp3-icon{color:inherit}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning .bp3-menu-item-label{color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning.bp3-active{background-color:#d9822b}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:active{background-color:#bf7326}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:hover:before,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item:before,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:hover:after,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item:after,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-warning.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning:active .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning.bp3-active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning.bp3-active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning.bp3-active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-warning.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger .bp3-icon{color:inherit}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger .bp3-menu-item-label{color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger.bp3-active{background-color:#db3737}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:active{background-color:#c23030}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:hover,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:hover:before,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item:before,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:hover:after,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item:after,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:hover .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-submenu .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-submenu .bp3-dark .bp3-popover-target.bp3-popover-open>.bp3-intent-danger.bp3-menu-item .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger:active .bp3-menu-item-label,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger.bp3-active,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger.bp3-active:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger.bp3-active:after,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-intent-danger.bp3-active .bp3-menu-item-label{color:#fff}.jupyter-wrapper .bp3-dark .bp3-menu-item:before,.jupyter-wrapper .bp3-dark .bp3-menu-item>.bp3-icon{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-menu-item .bp3-menu-item-label{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-active,.jupyter-wrapper .bp3-dark .bp3-menu-item:active{background-color:#8a9ba84d}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-disabled{color:#a7b6c299!important}.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-disabled:before,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-disabled>.bp3-icon,.jupyter-wrapper .bp3-dark .bp3-menu-item.bp3-disabled .bp3-menu-item-label{color:#a7b6c299!important}.jupyter-wrapper .bp3-dark .bp3-menu-divider,.jupyter-wrapper .bp3-dark .bp3-menu-header{border-color:#ffffff26}.jupyter-wrapper .bp3-dark .bp3-menu-header>h6{color:#f5f8fa}.jupyter-wrapper .bp3-label .bp3-menu{margin-top:5px}.jupyter-wrapper .bp3-navbar{background-color:#fff;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 0 #10161a00,0 1px 1px #10161a33;height:50px;padding:0 15px;position:relative;width:100%;z-index:10}.jupyter-wrapper .bp3-navbar.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-navbar{background-color:#394b59}.jupyter-wrapper .bp3-navbar.bp3-dark{-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.4);box-shadow:inset 0 0 0 1px #10161a33,0 0 #10161a00,0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-navbar{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 0 0 rgba(16,22,26,0),0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 0 #10161a00,0 1px 1px #10161a66}.jupyter-wrapper .bp3-navbar.bp3-fixed-top{left:0;position:fixed;right:0;top:0}.jupyter-wrapper .bp3-navbar-heading{font-size:16px;margin-right:15px}.jupyter-wrapper .bp3-navbar-group{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;height:50px}.jupyter-wrapper .bp3-navbar-group.bp3-align-left{float:left}.jupyter-wrapper .bp3-navbar-group.bp3-align-right{float:right}.jupyter-wrapper .bp3-navbar-divider{border-left:1px solid rgba(16,22,26,.15);height:20px;margin:0 10px}.jupyter-wrapper .bp3-dark .bp3-navbar-divider{border-left-color:#ffffff26}.jupyter-wrapper .bp3-non-ideal-state{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-align:center;-ms-flex-align:center;align-items:center;height:100%;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;text-align:center;width:100%}.jupyter-wrapper .bp3-non-ideal-state>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-non-ideal-state>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-non-ideal-state:before,.jupyter-wrapper .bp3-non-ideal-state>*{margin-bottom:20px}.jupyter-wrapper .bp3-non-ideal-state:empty:before,.jupyter-wrapper .bp3-non-ideal-state>:last-child{margin-bottom:0}.jupyter-wrapper .bp3-non-ideal-state>*{max-width:400px}.jupyter-wrapper .bp3-non-ideal-state-visual{color:#5c708099;font-size:60px}.jupyter-wrapper .bp3-dark .bp3-non-ideal-state-visual{color:#a7b6c299}.jupyter-wrapper .bp3-overflow-list{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:nowrap;flex-wrap:nowrap;min-width:0}.jupyter-wrapper .bp3-overflow-list-spacer{-ms-flex-negative:1;flex-shrink:1;width:1px}.jupyter-wrapper body.bp3-overlay-open{overflow:hidden}.jupyter-wrapper .bp3-overlay{bottom:0;left:0;position:static;right:0;top:0;z-index:20}.jupyter-wrapper .bp3-overlay:not(.bp3-overlay-open){pointer-events:none}.jupyter-wrapper .bp3-overlay.bp3-overlay-container{overflow:hidden;position:fixed}.jupyter-wrapper .bp3-overlay.bp3-overlay-container.bp3-overlay-inline{position:absolute}.jupyter-wrapper .bp3-overlay.bp3-overlay-scroll-container{overflow:auto;position:fixed}.jupyter-wrapper .bp3-overlay.bp3-overlay-scroll-container.bp3-overlay-inline{position:absolute}.jupyter-wrapper .bp3-overlay.bp3-overlay-inline{display:inline;overflow:visible}.jupyter-wrapper .bp3-overlay-content{position:fixed;z-index:20}.jupyter-wrapper .bp3-overlay-inline .bp3-overlay-content,.jupyter-wrapper .bp3-overlay-scroll-container .bp3-overlay-content{position:absolute}.jupyter-wrapper .bp3-overlay-backdrop{bottom:0;left:0;position:fixed;right:0;top:0;opacity:1;background-color:#10161ab3;overflow:auto;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;z-index:20}.jupyter-wrapper .bp3-overlay-backdrop.bp3-overlay-enter,.jupyter-wrapper .bp3-overlay-backdrop.bp3-overlay-appear{opacity:0}.jupyter-wrapper .bp3-overlay-backdrop.bp3-overlay-enter-active,.jupyter-wrapper .bp3-overlay-backdrop.bp3-overlay-appear-active{opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:opacity;transition-property:opacity;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-overlay-backdrop.bp3-overlay-exit{opacity:1}.jupyter-wrapper .bp3-overlay-backdrop.bp3-overlay-exit-active{opacity:0;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:opacity;transition-property:opacity;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-overlay-backdrop:focus{outline:none}.jupyter-wrapper .bp3-overlay-inline .bp3-overlay-backdrop{position:absolute}.jupyter-wrapper .bp3-panel-stack{overflow:hidden;position:relative}.jupyter-wrapper .bp3-panel-stack-header{-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-shadow:0 1px rgba(16,22,26,.15);box-shadow:0 1px #10161a26;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-negative:0;flex-shrink:0;height:30px;z-index:1}.jupyter-wrapper .bp3-dark .bp3-panel-stack-header{-webkit-box-shadow:0 1px rgba(255,255,255,.15);box-shadow:0 1px #ffffff26}.jupyter-wrapper .bp3-panel-stack-header>span{-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1;flex:1}.jupyter-wrapper .bp3-panel-stack-header .bp3-heading{margin:0 5px}.jupyter-wrapper .bp3-button.bp3-panel-stack-header-back{margin-left:5px;padding-left:0;white-space:nowrap}.jupyter-wrapper .bp3-button.bp3-panel-stack-header-back .bp3-icon{margin:0 2px}.jupyter-wrapper .bp3-panel-stack-view{bottom:0;left:0;position:absolute;right:0;top:0;background-color:#fff;border-right:1px solid rgba(16,22,26,.15);display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin-right:-1px;overflow-y:auto;z-index:1}.jupyter-wrapper .bp3-dark .bp3-panel-stack-view{background-color:#30404d}.jupyter-wrapper .bp3-panel-stack-view:nth-last-child(n+4){display:none}.jupyter-wrapper .bp3-panel-stack-push .bp3-panel-stack-enter,.jupyter-wrapper .bp3-panel-stack-push .bp3-panel-stack-appear{-webkit-transform:translateX(100%);transform:translate(100%);opacity:0}.jupyter-wrapper .bp3-panel-stack-push .bp3-panel-stack-enter-active,.jupyter-wrapper .bp3-panel-stack-push .bp3-panel-stack-appear-active{-webkit-transform:translate(0%);transform:translate(0);opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack-push .bp3-panel-stack-exit{-webkit-transform:translate(0%);transform:translate(0);opacity:1}.jupyter-wrapper .bp3-panel-stack-push .bp3-panel-stack-exit-active{-webkit-transform:translateX(-50%);transform:translate(-50%);opacity:0;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack-pop .bp3-panel-stack-enter,.jupyter-wrapper .bp3-panel-stack-pop .bp3-panel-stack-appear{-webkit-transform:translateX(-50%);transform:translate(-50%);opacity:0}.jupyter-wrapper .bp3-panel-stack-pop .bp3-panel-stack-enter-active,.jupyter-wrapper .bp3-panel-stack-pop .bp3-panel-stack-appear-active{-webkit-transform:translate(0%);transform:translate(0);opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack-pop .bp3-panel-stack-exit{-webkit-transform:translate(0%);transform:translate(0);opacity:1}.jupyter-wrapper .bp3-panel-stack-pop .bp3-panel-stack-exit-active{-webkit-transform:translateX(100%);transform:translate(100%);opacity:0;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack2{overflow:hidden;position:relative}.jupyter-wrapper .bp3-panel-stack2-header{-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-shadow:0 1px rgba(16,22,26,.15);box-shadow:0 1px #10161a26;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-negative:0;flex-shrink:0;height:30px;z-index:1}.jupyter-wrapper .bp3-dark .bp3-panel-stack2-header{-webkit-box-shadow:0 1px rgba(255,255,255,.15);box-shadow:0 1px #ffffff26}.jupyter-wrapper .bp3-panel-stack2-header>span{-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1;flex:1}.jupyter-wrapper .bp3-panel-stack2-header .bp3-heading{margin:0 5px}.jupyter-wrapper .bp3-button.bp3-panel-stack2-header-back{margin-left:5px;padding-left:0;white-space:nowrap}.jupyter-wrapper .bp3-button.bp3-panel-stack2-header-back .bp3-icon{margin:0 2px}.jupyter-wrapper .bp3-panel-stack2-view{bottom:0;left:0;position:absolute;right:0;top:0;background-color:#fff;border-right:1px solid rgba(16,22,26,.15);display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin-right:-1px;overflow-y:auto;z-index:1}.jupyter-wrapper .bp3-dark .bp3-panel-stack2-view{background-color:#30404d}.jupyter-wrapper .bp3-panel-stack2-view:nth-last-child(n+4){display:none}.jupyter-wrapper .bp3-panel-stack2-push .bp3-panel-stack2-enter,.jupyter-wrapper .bp3-panel-stack2-push .bp3-panel-stack2-appear{-webkit-transform:translateX(100%);transform:translate(100%);opacity:0}.jupyter-wrapper .bp3-panel-stack2-push .bp3-panel-stack2-enter-active,.jupyter-wrapper .bp3-panel-stack2-push .bp3-panel-stack2-appear-active{-webkit-transform:translate(0%);transform:translate(0);opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack2-push .bp3-panel-stack2-exit{-webkit-transform:translate(0%);transform:translate(0);opacity:1}.jupyter-wrapper .bp3-panel-stack2-push .bp3-panel-stack2-exit-active{-webkit-transform:translateX(-50%);transform:translate(-50%);opacity:0;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack2-pop .bp3-panel-stack2-enter,.jupyter-wrapper .bp3-panel-stack2-pop .bp3-panel-stack2-appear{-webkit-transform:translateX(-50%);transform:translate(-50%);opacity:0}.jupyter-wrapper .bp3-panel-stack2-pop .bp3-panel-stack2-enter-active,.jupyter-wrapper .bp3-panel-stack2-pop .bp3-panel-stack2-appear-active{-webkit-transform:translate(0%);transform:translate(0);opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-panel-stack2-pop .bp3-panel-stack2-exit{-webkit-transform:translate(0%);transform:translate(0);opacity:1}.jupyter-wrapper .bp3-panel-stack2-pop .bp3-panel-stack2-exit-active{-webkit-transform:translateX(100%);transform:translate(100%);opacity:0;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.4s;transition-duration:.4s;-webkit-transition-property:opacity,-webkit-transform;transition-property:opacity,-webkit-transform;transition-property:transform,opacity;transition-property:transform,opacity,-webkit-transform;-webkit-transition-timing-function:ease;transition-timing-function:ease}.jupyter-wrapper .bp3-popover{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33;-webkit-transform:scale(1);transform:scale(1);border-radius:3px;display:inline-block;z-index:20}.jupyter-wrapper .bp3-popover .bp3-popover-arrow{height:30px;position:absolute;width:30px}.jupyter-wrapper .bp3-popover .bp3-popover-arrow:before{height:20px;margin:5px;width:20px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-top>.bp3-popover{margin-bottom:17px;margin-top:-17px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-top>.bp3-popover>.bp3-popover-arrow{bottom:-11px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-top>.bp3-popover>.bp3-popover-arrow svg{-webkit-transform:rotate(-90deg);transform:rotate(-90deg)}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-right>.bp3-popover{margin-left:17px}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-right>.bp3-popover>.bp3-popover-arrow{left:-11px}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-right>.bp3-popover>.bp3-popover-arrow svg{-webkit-transform:rotate(0);transform:rotate(0)}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-bottom>.bp3-popover{margin-top:17px}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-bottom>.bp3-popover>.bp3-popover-arrow{top:-11px}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-bottom>.bp3-popover>.bp3-popover-arrow svg{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-left>.bp3-popover{margin-left:-17px;margin-right:17px}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-left>.bp3-popover>.bp3-popover-arrow{right:-11px}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-left>.bp3-popover>.bp3-popover-arrow svg{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.jupyter-wrapper .bp3-tether-element-attached-middle>.bp3-popover>.bp3-popover-arrow{top:50%;-webkit-transform:translateY(-50%);transform:translateY(-50%)}.jupyter-wrapper .bp3-tether-element-attached-center>.bp3-popover>.bp3-popover-arrow{right:50%;-webkit-transform:translateX(50%);transform:translate(50%)}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-top>.bp3-popover>.bp3-popover-arrow{top:-.3934px}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-right>.bp3-popover>.bp3-popover-arrow{right:-.3934px}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-left>.bp3-popover>.bp3-popover-arrow{left:-.3934px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-bottom>.bp3-popover>.bp3-popover-arrow{bottom:-.3934px}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-element-attached-left>.bp3-popover{-webkit-transform-origin:top left;transform-origin:top left}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-element-attached-center>.bp3-popover{-webkit-transform-origin:top center;transform-origin:top center}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-element-attached-right>.bp3-popover{-webkit-transform-origin:top right;transform-origin:top right}.jupyter-wrapper .bp3-tether-element-attached-middle.bp3-tether-element-attached-left>.bp3-popover{-webkit-transform-origin:center left;transform-origin:center left}.jupyter-wrapper .bp3-tether-element-attached-middle.bp3-tether-element-attached-center>.bp3-popover{-webkit-transform-origin:center center;transform-origin:center center}.jupyter-wrapper .bp3-tether-element-attached-middle.bp3-tether-element-attached-right>.bp3-popover{-webkit-transform-origin:center right;transform-origin:center right}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-element-attached-left>.bp3-popover{-webkit-transform-origin:bottom left;transform-origin:bottom left}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-element-attached-center>.bp3-popover{-webkit-transform-origin:bottom center;transform-origin:bottom center}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-element-attached-right>.bp3-popover{-webkit-transform-origin:bottom right;transform-origin:bottom right}.jupyter-wrapper .bp3-popover .bp3-popover-content{background:#ffffff;color:inherit}.jupyter-wrapper .bp3-popover .bp3-popover-arrow:before{-webkit-box-shadow:1px 1px 6px rgba(16,22,26,.2);box-shadow:1px 1px 6px #10161a33}.jupyter-wrapper .bp3-popover .bp3-popover-arrow-border{fill:#10161a;fill-opacity:.1}.jupyter-wrapper .bp3-popover .bp3-popover-arrow-fill{fill:#fff}.jupyter-wrapper .bp3-popover-enter>.bp3-popover,.jupyter-wrapper .bp3-popover-appear>.bp3-popover{-webkit-transform:scale(.3);transform:scale(.3)}.jupyter-wrapper .bp3-popover-enter-active>.bp3-popover,.jupyter-wrapper .bp3-popover-appear-active>.bp3-popover{-webkit-transform:scale(1);transform:scale(1);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.54,1.12,.38,1.11);transition-timing-function:cubic-bezier(.54,1.12,.38,1.11)}.jupyter-wrapper .bp3-popover-exit>.bp3-popover{-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-popover-exit-active>.bp3-popover{-webkit-transform:scale(.3);transform:scale(.3);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.54,1.12,.38,1.11);transition-timing-function:cubic-bezier(.54,1.12,.38,1.11)}.jupyter-wrapper .bp3-popover .bp3-popover-content{border-radius:3px;position:relative}.jupyter-wrapper .bp3-popover.bp3-popover-content-sizing .bp3-popover-content{max-width:350px;padding:20px}.jupyter-wrapper .bp3-popover-target+.bp3-overlay .bp3-popover.bp3-popover-content-sizing{width:350px}.jupyter-wrapper .bp3-popover.bp3-minimal{margin:0!important}.jupyter-wrapper .bp3-popover.bp3-minimal .bp3-popover-arrow{display:none}.jupyter-wrapper .bp3-popover.bp3-minimal.bp3-popover{-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-popover-enter>.bp3-popover.bp3-minimal.bp3-popover,.jupyter-wrapper .bp3-popover-appear>.bp3-popover.bp3-minimal.bp3-popover{-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-popover-enter-active>.bp3-popover.bp3-minimal.bp3-popover,.jupyter-wrapper .bp3-popover-appear-active>.bp3-popover.bp3-minimal.bp3-popover{-webkit-transform:scale(1);transform:scale(1);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-popover-exit>.bp3-popover.bp3-minimal.bp3-popover{-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-popover-exit-active>.bp3-popover.bp3-minimal.bp3-popover{-webkit-transform:scale(1);transform:scale(1);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-popover.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-popover{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66}.jupyter-wrapper .bp3-popover.bp3-dark .bp3-popover-content,.jupyter-wrapper .bp3-dark .bp3-popover .bp3-popover-content{background:#30404d;color:inherit}.jupyter-wrapper .bp3-popover.bp3-dark .bp3-popover-arrow:before,.jupyter-wrapper .bp3-dark .bp3-popover .bp3-popover-arrow:before{-webkit-box-shadow:1px 1px 6px rgba(16,22,26,.4);box-shadow:1px 1px 6px #10161a66}.jupyter-wrapper .bp3-popover.bp3-dark .bp3-popover-arrow-border,.jupyter-wrapper .bp3-dark .bp3-popover .bp3-popover-arrow-border{fill:#10161a;fill-opacity:.2}.jupyter-wrapper .bp3-popover.bp3-dark .bp3-popover-arrow-fill,.jupyter-wrapper .bp3-dark .bp3-popover .bp3-popover-arrow-fill{fill:#30404d}.jupyter-wrapper .bp3-popover-arrow:before{border-radius:2px;content:\"\";display:block;position:absolute;-webkit-transform:rotate(45deg);transform:rotate(45deg)}.jupyter-wrapper .bp3-tether-pinned .bp3-popover-arrow{display:none}.jupyter-wrapper .bp3-popover-backdrop{background:rgba(255,255,255,0)}.jupyter-wrapper .bp3-transition-container{opacity:1;display:-webkit-box;display:-ms-flexbox;display:flex;z-index:20}.jupyter-wrapper .bp3-transition-container.bp3-popover-enter,.jupyter-wrapper .bp3-transition-container.bp3-popover-appear{opacity:0}.jupyter-wrapper .bp3-transition-container.bp3-popover-enter-active,.jupyter-wrapper .bp3-transition-container.bp3-popover-appear-active{opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:opacity;transition-property:opacity;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-transition-container.bp3-popover-exit{opacity:1}.jupyter-wrapper .bp3-transition-container.bp3-popover-exit-active{opacity:0;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:opacity;transition-property:opacity;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-transition-container:focus{outline:none}.jupyter-wrapper .bp3-transition-container.bp3-popover-leave .bp3-popover-content{pointer-events:none}.jupyter-wrapper .bp3-transition-container[data-x-out-of-boundaries]{display:none}.jupyter-wrapper span.bp3-popover-target{display:inline-block}.jupyter-wrapper .bp3-popover-wrapper.bp3-fill{width:100%}.jupyter-wrapper .bp3-portal{left:0;position:absolute;right:0;top:0}@-webkit-keyframes linear-progress-bar-stripes{0%{background-position:0 0}to{background-position:30px 0}}@keyframes linear-progress-bar-stripes{0%{background-position:0 0}to{background-position:30px 0}}.jupyter-wrapper .bp3-progress-bar{background:rgba(92,112,128,.2);border-radius:40px;display:block;height:8px;overflow:hidden;position:relative;width:100%}.jupyter-wrapper .bp3-progress-bar .bp3-progress-meter{background:linear-gradient(-45deg,rgba(255,255,255,.2) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.2) 50%,rgba(255,255,255,.2) 75%,transparent 75%);background-color:#5c7080cc;background-size:30px 30px;border-radius:40px;height:100%;position:absolute;-webkit-transition:width .2s cubic-bezier(.4,1,.75,.9);transition:width .2s cubic-bezier(.4,1,.75,.9);width:100%}.jupyter-wrapper .bp3-progress-bar:not(.bp3-no-animation):not(.bp3-no-stripes) .bp3-progress-meter{animation:linear-progress-bar-stripes .3s linear infinite reverse}.jupyter-wrapper .bp3-progress-bar.bp3-no-stripes .bp3-progress-meter{background-image:none}.jupyter-wrapper .bp3-dark .bp3-progress-bar{background:rgba(16,22,26,.5)}.jupyter-wrapper .bp3-dark .bp3-progress-bar .bp3-progress-meter{background-color:#8a9ba8}.jupyter-wrapper .bp3-progress-bar.bp3-intent-primary .bp3-progress-meter{background-color:#137cbd}.jupyter-wrapper .bp3-progress-bar.bp3-intent-success .bp3-progress-meter{background-color:#0f9960}.jupyter-wrapper .bp3-progress-bar.bp3-intent-warning .bp3-progress-meter{background-color:#d9822b}.jupyter-wrapper .bp3-progress-bar.bp3-intent-danger .bp3-progress-meter{background-color:#db3737}@-webkit-keyframes skeleton-glow{0%{background:rgba(206,217,224,.2);border-color:#ced9e033}to{background:rgba(92,112,128,.2);border-color:#5c708033}}@keyframes skeleton-glow{0%{background:rgba(206,217,224,.2);border-color:#ced9e033}to{background:rgba(92,112,128,.2);border-color:#5c708033}}.jupyter-wrapper .bp3-skeleton{-webkit-animation:1s linear infinite alternate skeleton-glow;animation:1s linear infinite alternate skeleton-glow;background:rgba(206,217,224,.2);background-clip:padding-box!important;border-color:#ced9e033!important;border-radius:2px;-webkit-box-shadow:none!important;box-shadow:none!important;color:transparent!important;cursor:default;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .bp3-skeleton:before,.jupyter-wrapper .bp3-skeleton:after,.jupyter-wrapper .bp3-skeleton *{visibility:hidden!important}.jupyter-wrapper .bp3-slider{height:40px;min-width:150px;width:100%;cursor:default;outline:none;position:relative;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .bp3-slider:hover{cursor:pointer}.jupyter-wrapper .bp3-slider:active{cursor:-webkit-grabbing;cursor:grabbing}.jupyter-wrapper .bp3-slider.bp3-disabled{cursor:not-allowed;opacity:.5}.jupyter-wrapper .bp3-slider.bp3-slider-unlabeled{height:16px}.jupyter-wrapper .bp3-slider-track,.jupyter-wrapper .bp3-slider-progress{height:6px;left:0;right:0;top:5px;position:absolute}.jupyter-wrapper .bp3-slider-track{border-radius:3px;overflow:hidden}.jupyter-wrapper .bp3-slider-progress{background:rgba(92,112,128,.2)}.jupyter-wrapper .bp3-dark .bp3-slider-progress{background:rgba(16,22,26,.5)}.jupyter-wrapper .bp3-slider-progress.bp3-intent-primary{background-color:#137cbd}.jupyter-wrapper .bp3-slider-progress.bp3-intent-success{background-color:#0f9960}.jupyter-wrapper .bp3-slider-progress.bp3-intent-warning{background-color:#d9822b}.jupyter-wrapper .bp3-slider-progress.bp3-intent-danger{background-color:#db3737}.jupyter-wrapper .bp3-slider-handle{background-color:#f5f8fa;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.8)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.8),rgba(255,255,255,0));-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a;color:#182026;border-radius:3px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a33,0 1px 1px #10161a33;cursor:pointer;height:16px;left:0;position:absolute;top:0;width:16px}.jupyter-wrapper .bp3-slider-handle:hover{background-clip:padding-box;background-color:#ebf1f5;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a}.jupyter-wrapper .bp3-slider-handle:active,.jupyter-wrapper .bp3-slider-handle.bp3-active{background-color:#d8e1e8;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-slider-handle:disabled,.jupyter-wrapper .bp3-slider-handle.bp3-disabled{background-color:#ced9e080;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#5c708099;cursor:not-allowed;outline:none}.jupyter-wrapper .bp3-slider-handle:disabled.bp3-active,.jupyter-wrapper .bp3-slider-handle:disabled.bp3-active:hover,.jupyter-wrapper .bp3-slider-handle.bp3-disabled.bp3-active,.jupyter-wrapper .bp3-slider-handle.bp3-disabled.bp3-active:hover{background:rgba(206,217,224,.7)}.jupyter-wrapper .bp3-slider-handle:focus{z-index:1}.jupyter-wrapper .bp3-slider-handle:hover{background-clip:padding-box;background-color:#ebf1f5;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 -1px 0 rgba(16,22,26,.1);box-shadow:inset 0 0 0 1px #10161a33,inset 0 -1px #10161a1a;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a33,0 1px 1px #10161a33;cursor:-webkit-grab;cursor:grab;z-index:2}.jupyter-wrapper .bp3-slider-handle.bp3-active{background-color:#d8e1e8;background-image:none;-webkit-box-shadow:inset 0 0 0 1px rgba(16,22,26,.2),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:inset 0 0 0 1px #10161a33,inset 0 1px 2px #10161a33;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),inset 0 1px 1px rgba(16,22,26,.1);box-shadow:0 0 0 1px #10161a33,inset 0 1px 1px #10161a1a;cursor:-webkit-grabbing;cursor:grabbing}.jupyter-wrapper .bp3-disabled .bp3-slider-handle{background:#bfccd6;-webkit-box-shadow:none;box-shadow:none;pointer-events:none}.jupyter-wrapper .bp3-dark .bp3-slider-handle{background-color:#394b59;background-image:-webkit-gradient(linear,left top,left bottom,from(rgba(255,255,255,.05)),to(rgba(255,255,255,0)));background-image:linear-gradient(to bottom,rgba(255,255,255,.05),rgba(255,255,255,0));-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66;color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-slider-handle:hover,.jupyter-wrapper .bp3-dark .bp3-slider-handle:active,.jupyter-wrapper .bp3-dark .bp3-slider-handle.bp3-active{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-slider-handle:hover{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-slider-handle:active,.jupyter-wrapper .bp3-dark .bp3-slider-handle.bp3-active{background-color:#202b33;background-image:none;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.6),inset 0 1px 2px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a99,inset 0 1px 2px #10161a33}.jupyter-wrapper .bp3-dark .bp3-slider-handle:disabled,.jupyter-wrapper .bp3-dark .bp3-slider-handle.bp3-disabled{background-color:#394b5980;background-image:none;-webkit-box-shadow:none;box-shadow:none;color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-slider-handle:disabled.bp3-active,.jupyter-wrapper .bp3-dark .bp3-slider-handle.bp3-disabled.bp3-active{background:rgba(57,75,89,.7)}.jupyter-wrapper .bp3-dark .bp3-slider-handle .bp3-button-spinner .bp3-spinner-head{background:rgba(16,22,26,.5);stroke:#8a9ba8}.jupyter-wrapper .bp3-dark .bp3-slider-handle,.jupyter-wrapper .bp3-dark .bp3-slider-handle:hover{background-color:#394b59}.jupyter-wrapper .bp3-dark .bp3-slider-handle.bp3-active{background-color:#293742}.jupyter-wrapper .bp3-dark .bp3-disabled .bp3-slider-handle{background:#5c7080;border-color:#5c7080;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-slider-handle .bp3-slider-label{background:#394b59;border-radius:3px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33;color:#f5f8fa;margin-left:8px}.jupyter-wrapper .bp3-dark .bp3-slider-handle .bp3-slider-label{background:#e1e8ed;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66;color:#394b59}.jupyter-wrapper .bp3-disabled .bp3-slider-handle .bp3-slider-label{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-slider-handle.bp3-start,.jupyter-wrapper .bp3-slider-handle.bp3-end{width:8px}.jupyter-wrapper .bp3-slider-handle.bp3-start{border-bottom-right-radius:0;border-top-right-radius:0}.jupyter-wrapper .bp3-slider-handle.bp3-end{border-bottom-left-radius:0;border-top-left-radius:0;margin-left:8px}.jupyter-wrapper .bp3-slider-handle.bp3-end .bp3-slider-label{margin-left:0}.jupyter-wrapper .bp3-slider-label{-webkit-transform:translate(-50%,20px);transform:translate(-50%,20px);display:inline-block;font-size:12px;line-height:1;padding:2px 5px;position:absolute;vertical-align:top}.jupyter-wrapper .bp3-slider.bp3-vertical{height:150px;min-width:40px;width:40px}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-track,.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-progress{bottom:0;height:auto;left:5px;top:0;width:6px}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-progress{top:auto}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-label{-webkit-transform:translate(20px,50%);transform:translate(20px,50%)}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle{top:auto}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle .bp3-slider-label{margin-left:0;margin-top:-8px}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle.bp3-end,.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle.bp3-start{height:8px;margin-left:0;width:16px}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle.bp3-start{border-bottom-right-radius:3px;border-top-left-radius:0}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle.bp3-start .bp3-slider-label{-webkit-transform:translate(20px);transform:translate(20px)}.jupyter-wrapper .bp3-slider.bp3-vertical .bp3-slider-handle.bp3-end{border-bottom-left-radius:0;border-bottom-right-radius:0;border-top-left-radius:3px;margin-bottom:8px}@-webkit-keyframes pt-spinner-animation{0%{-webkit-transform:rotate(0deg);transform:rotate(0)}to{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes pt-spinner-animation{0%{-webkit-transform:rotate(0deg);transform:rotate(0)}to{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}.jupyter-wrapper .bp3-spinner{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;overflow:visible;vertical-align:middle}.jupyter-wrapper .bp3-spinner svg{display:block}.jupyter-wrapper .bp3-spinner path{fill-opacity:0}.jupyter-wrapper .bp3-spinner .bp3-spinner-head{stroke:#5c7080cc;stroke-linecap:round;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:stroke-dashoffset .2s cubic-bezier(.4,1,.75,.9);transition:stroke-dashoffset .2s cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-spinner .bp3-spinner-track{stroke:#5c708033}.jupyter-wrapper .bp3-spinner-animation{-webkit-animation:pt-spinner-animation .5s linear infinite;animation:pt-spinner-animation .5s linear infinite}.jupyter-wrapper .bp3-no-spin>.bp3-spinner-animation{-webkit-animation:none;animation:none}.jupyter-wrapper .bp3-dark .bp3-spinner .bp3-spinner-head{stroke:#8a9ba8}.jupyter-wrapper .bp3-dark .bp3-spinner .bp3-spinner-track{stroke:#10161a80}.jupyter-wrapper .bp3-spinner.bp3-intent-primary .bp3-spinner-head{stroke:#137cbd}.jupyter-wrapper .bp3-spinner.bp3-intent-success .bp3-spinner-head{stroke:#0f9960}.jupyter-wrapper .bp3-spinner.bp3-intent-warning .bp3-spinner-head{stroke:#d9822b}.jupyter-wrapper .bp3-spinner.bp3-intent-danger .bp3-spinner-head{stroke:#db3737}.jupyter-wrapper .bp3-tabs.bp3-vertical{display:-webkit-box;display:-ms-flexbox;display:flex}.jupyter-wrapper .bp3-tabs.bp3-vertical>.bp3-tab-list{-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.jupyter-wrapper .bp3-tabs.bp3-vertical>.bp3-tab-list .bp3-tab{border-radius:3px;padding:0 10px;width:100%}.jupyter-wrapper .bp3-tabs.bp3-vertical>.bp3-tab-list .bp3-tab[aria-selected=true]{background-color:#137cbd33;-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-tabs.bp3-vertical>.bp3-tab-list .bp3-tab-indicator-wrapper .bp3-tab-indicator{background-color:#137cbd33;border-radius:3px;bottom:0;height:auto;left:0;right:0;top:0}.jupyter-wrapper .bp3-tabs.bp3-vertical>.bp3-tab-panel{margin-top:0;padding-left:20px}.jupyter-wrapper .bp3-tab-list{-webkit-box-align:end;-ms-flex-align:end;align-items:flex-end;border:none;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;list-style:none;margin:0;padding:0;position:relative}.jupyter-wrapper .bp3-tab-list>*:not(:last-child){margin-right:20px}.jupyter-wrapper .bp3-tab{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;color:#182026;cursor:pointer;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;font-size:14px;line-height:30px;max-width:100%;position:relative;vertical-align:top}.jupyter-wrapper .bp3-tab a{color:inherit;display:block;text-decoration:none}.jupyter-wrapper .bp3-tab-indicator-wrapper~.bp3-tab{background-color:transparent!important;-webkit-box-shadow:none!important;box-shadow:none!important}.jupyter-wrapper .bp3-tab[aria-disabled=true]{color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-tab[aria-selected=true]{border-radius:0;-webkit-box-shadow:inset 0 -3px 0 #106ba3;box-shadow:inset 0 -3px #106ba3}.jupyter-wrapper .bp3-tab[aria-selected=true],.jupyter-wrapper .bp3-tab:not([aria-disabled=true]):hover{color:#106ba3}.jupyter-wrapper .bp3-tab:focus{-moz-outline-radius:0}.jupyter-wrapper .bp3-large>.bp3-tab{font-size:16px;line-height:40px}.jupyter-wrapper .bp3-tab-panel{margin-top:20px}.jupyter-wrapper .bp3-tab-panel[aria-hidden=true]{display:none}.jupyter-wrapper .bp3-tab-indicator-wrapper{left:0;pointer-events:none;position:absolute;top:0;-webkit-transform:translateX(0),translateY(0);transform:translate(0),translateY(0);-webkit-transition:height,width,-webkit-transform;transition:height,width,-webkit-transform;transition:height,transform,width;transition:height,transform,width,-webkit-transform;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-tab-indicator-wrapper .bp3-tab-indicator{background-color:#106ba3;bottom:0;height:3px;left:0;position:absolute;right:0}.jupyter-wrapper .bp3-tab-indicator-wrapper.bp3-no-animation{-webkit-transition:none;transition:none}.jupyter-wrapper .bp3-dark .bp3-tab{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-tab[aria-disabled=true]{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-tab[aria-selected=true]{-webkit-box-shadow:inset 0 -3px 0 #48aff0;box-shadow:inset 0 -3px #48aff0}.jupyter-wrapper .bp3-dark .bp3-tab[aria-selected=true],.jupyter-wrapper .bp3-dark .bp3-tab:not([aria-disabled=true]):hover{color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-tab-indicator{background-color:#48aff0}.jupyter-wrapper .bp3-flex-expander{-webkit-box-flex:1;-ms-flex:1 1;flex:1 1}.jupyter-wrapper .bp3-tag{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:center;-ms-flex-align:center;align-items:center;background-color:#5c7080;border:none;border-radius:3px;-webkit-box-shadow:none;box-shadow:none;color:#f5f8fa;font-size:12px;line-height:16px;max-width:100%;min-height:20px;min-width:20px;padding:2px 6px;position:relative}.jupyter-wrapper .bp3-tag.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-interactive:hover{background-color:#5c7080d9}.jupyter-wrapper .bp3-tag.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-interactive:active{background-color:#5c7080b3}.jupyter-wrapper .bp3-tag>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-tag>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-tag:before,.jupyter-wrapper .bp3-tag>*{margin-right:4px}.jupyter-wrapper .bp3-tag:empty:before,.jupyter-wrapper .bp3-tag>:last-child{margin-right:0}.jupyter-wrapper .bp3-tag:focus{outline:rgba(19,124,189,.6) auto 2px;outline-offset:0;-moz-outline-radius:6px}.jupyter-wrapper .bp3-tag.bp3-round{border-radius:30px;padding-left:8px;padding-right:8px}.jupyter-wrapper .bp3-dark .bp3-tag{background-color:#bfccd6;color:#182026}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-interactive:hover{background-color:#bfccd6d9}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-interactive:active{background-color:#bfccd6b3}.jupyter-wrapper .bp3-dark .bp3-tag>.bp3-icon,.jupyter-wrapper .bp3-dark .bp3-tag .bp3-icon-standard,.jupyter-wrapper .bp3-dark .bp3-tag .bp3-icon-large{fill:currentColor}.jupyter-wrapper .bp3-tag>.bp3-icon,.jupyter-wrapper .bp3-tag .bp3-icon-standard,.jupyter-wrapper .bp3-tag .bp3-icon-large{fill:#fff}.jupyter-wrapper .bp3-tag.bp3-large,.jupyter-wrapper .bp3-large .bp3-tag{font-size:14px;line-height:20px;min-height:30px;min-width:30px;padding:5px 10px}.jupyter-wrapper .bp3-tag.bp3-large:before,.jupyter-wrapper .bp3-tag.bp3-large>*,.jupyter-wrapper .bp3-large .bp3-tag:before,.jupyter-wrapper .bp3-large .bp3-tag>*{margin-right:7px}.jupyter-wrapper .bp3-tag.bp3-large:empty:before,.jupyter-wrapper .bp3-tag.bp3-large>:last-child,.jupyter-wrapper .bp3-large .bp3-tag:empty:before,.jupyter-wrapper .bp3-large .bp3-tag>:last-child{margin-right:0}.jupyter-wrapper .bp3-tag.bp3-large.bp3-round,.jupyter-wrapper .bp3-large .bp3-tag.bp3-round{padding-left:12px;padding-right:12px}.jupyter-wrapper .bp3-tag.bp3-intent-primary{background:#137cbd;color:#fff}.jupyter-wrapper .bp3-tag.bp3-intent-primary.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-intent-primary.bp3-interactive:hover{background-color:#137cbdd9}.jupyter-wrapper .bp3-tag.bp3-intent-primary.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-intent-primary.bp3-interactive:active{background-color:#137cbdb3}.jupyter-wrapper .bp3-tag.bp3-intent-success{background:#0f9960;color:#fff}.jupyter-wrapper .bp3-tag.bp3-intent-success.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-intent-success.bp3-interactive:hover{background-color:#0f9960d9}.jupyter-wrapper .bp3-tag.bp3-intent-success.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-intent-success.bp3-interactive:active{background-color:#0f9960b3}.jupyter-wrapper .bp3-tag.bp3-intent-warning{background:#d9822b;color:#fff}.jupyter-wrapper .bp3-tag.bp3-intent-warning.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-intent-warning.bp3-interactive:hover{background-color:#d9822bd9}.jupyter-wrapper .bp3-tag.bp3-intent-warning.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-intent-warning.bp3-interactive:active{background-color:#d9822bb3}.jupyter-wrapper .bp3-tag.bp3-intent-danger{background:#db3737;color:#fff}.jupyter-wrapper .bp3-tag.bp3-intent-danger.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-intent-danger.bp3-interactive:hover{background-color:#db3737d9}.jupyter-wrapper .bp3-tag.bp3-intent-danger.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-intent-danger.bp3-interactive:active{background-color:#db3737b3}.jupyter-wrapper .bp3-tag.bp3-fill{display:-webkit-box;display:-ms-flexbox;display:flex;width:100%}.jupyter-wrapper .bp3-tag.bp3-minimal>.bp3-icon,.jupyter-wrapper .bp3-tag.bp3-minimal .bp3-icon-standard,.jupyter-wrapper .bp3-tag.bp3-minimal .bp3-icon-large{fill:#5c7080}.jupyter-wrapper .bp3-tag.bp3-minimal:not([class*=bp3-intent-]){background-color:#8a9ba833;color:#182026}.jupyter-wrapper .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive:hover{background-color:#5c70804d}.jupyter-wrapper .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive:active{background-color:#5c708066}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]){color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive:hover{background-color:#bfccd64d}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive.bp3-active,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]).bp3-interactive:active{background-color:#bfccd666}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-])>.bp3-icon,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]) .bp3-icon-standard,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal:not([class*=bp3-intent-]) .bp3-icon-large{fill:#a7b6c2}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary{background-color:#137cbd26;color:#106ba3}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive:hover{background-color:#137cbd40}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive:active{background-color:#137cbd59}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary>.bp3-icon,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary .bp3-icon-standard,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-primary .bp3-icon-large{fill:#137cbd}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-primary{background-color:#137cbd40;color:#48aff0}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive:hover{background-color:#137cbd59}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-primary.bp3-interactive:active{background-color:#137cbd73}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success{background-color:#0f996026;color:#0d8050}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive:hover{background-color:#0f996040}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive:active{background-color:#0f996059}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success>.bp3-icon,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success .bp3-icon-standard,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-success .bp3-icon-large{fill:#0f9960}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-success{background-color:#0f996040;color:#3dcc91}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive:hover{background-color:#0f996059}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-success.bp3-interactive:active{background-color:#0f996073}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning{background-color:#d9822b26;color:#bf7326}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive:hover{background-color:#d9822b40}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive:active{background-color:#d9822b59}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning>.bp3-icon,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning .bp3-icon-standard,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-warning .bp3-icon-large{fill:#d9822b}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-warning{background-color:#d9822b40;color:#ffb366}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive:hover{background-color:#d9822b59}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-warning.bp3-interactive:active{background-color:#d9822b73}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger{background-color:#db373726;color:#c23030}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive:hover{background-color:#db373740}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive:active{background-color:#db373759}.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger>.bp3-icon,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger .bp3-icon-standard,.jupyter-wrapper .bp3-tag.bp3-minimal.bp3-intent-danger .bp3-icon-large{fill:#db3737}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-danger{background-color:#db373740;color:#ff7373}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive{cursor:pointer}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive:hover{background-color:#db373759}.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive.bp3-active,.jupyter-wrapper .bp3-dark .bp3-tag.bp3-minimal.bp3-intent-danger.bp3-interactive:active{background-color:#db373773}.jupyter-wrapper .bp3-tag-remove{background:none;border:none;color:inherit;cursor:pointer;display:-webkit-box;display:-ms-flexbox;display:flex;margin-bottom:-2px;margin-right:-6px!important;margin-top:-2px;opacity:.5;padding:2px 2px 2px 0}.jupyter-wrapper .bp3-tag-remove:hover{background:none;opacity:.8;text-decoration:none}.jupyter-wrapper .bp3-tag-remove:active{opacity:1}.jupyter-wrapper .bp3-tag-remove:empty:before{font-family:Icons16,sans-serif;font-size:16px;font-style:normal;font-weight:400;line-height:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;content:\"\ue6d7\"}.jupyter-wrapper .bp3-large .bp3-tag-remove{margin-right:-10px!important;padding:0 5px 0 0}.jupyter-wrapper .bp3-large .bp3-tag-remove:empty:before{font-family:Icons20,sans-serif;font-size:20px;font-style:normal;font-weight:400;line-height:1}.jupyter-wrapper .bp3-tag-input{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;cursor:text;height:auto;line-height:inherit;min-height:30px;padding-left:5px;padding-right:0}.jupyter-wrapper .bp3-tag-input>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-tag-input>.bp3-tag-input-values{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-icon{color:#5c7080;margin-left:2px;margin-right:7px;margin-top:7px}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-ms-flex-item-align:stretch;align-self:stretch;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:7px;margin-top:5px;min-width:0}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values>*{-webkit-box-flex:0;-ms-flex-positive:0;flex-grow:0;-ms-flex-negative:0;flex-shrink:0}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values>.bp3-fill{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-ms-flex-negative:1;flex-shrink:1}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values:before,.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values>*{margin-right:5px}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values:empty:before,.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values>:last-child{margin-right:0}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values:first-child .bp3-input-ghost:first-child{padding-left:5px}.jupyter-wrapper .bp3-tag-input .bp3-tag-input-values>*{margin-bottom:5px}.jupyter-wrapper .bp3-tag-input .bp3-tag{overflow-wrap:break-word}.jupyter-wrapper .bp3-tag-input .bp3-tag.bp3-active{outline:rgba(19,124,189,.6) auto 2px;outline-offset:0;-moz-outline-radius:6px}.jupyter-wrapper .bp3-tag-input .bp3-input-ghost{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;line-height:20px;width:80px}.jupyter-wrapper .bp3-tag-input .bp3-input-ghost:disabled,.jupyter-wrapper .bp3-tag-input .bp3-input-ghost.bp3-disabled{cursor:not-allowed}.jupyter-wrapper .bp3-tag-input .bp3-button,.jupyter-wrapper .bp3-tag-input .bp3-spinner{margin:3px 3px 3px 0}.jupyter-wrapper .bp3-tag-input .bp3-button{min-height:24px;min-width:24px;padding:0 7px}.jupyter-wrapper .bp3-tag-input.bp3-large{height:auto;min-height:40px}.jupyter-wrapper .bp3-tag-input.bp3-large:before,.jupyter-wrapper .bp3-tag-input.bp3-large>*{margin-right:10px}.jupyter-wrapper .bp3-tag-input.bp3-large:empty:before,.jupyter-wrapper .bp3-tag-input.bp3-large>:last-child{margin-right:0}.jupyter-wrapper .bp3-tag-input.bp3-large .bp3-tag-input-icon{margin-left:5px;margin-top:10px}.jupyter-wrapper .bp3-tag-input.bp3-large .bp3-input-ghost{line-height:30px}.jupyter-wrapper .bp3-tag-input.bp3-large .bp3-button{min-height:30px;min-width:30px;padding:5px 10px;margin:5px 5px 5px 0}.jupyter-wrapper .bp3-tag-input.bp3-large .bp3-spinner{margin:8px 8px 8px 0}.jupyter-wrapper .bp3-tag-input.bp3-active{background-color:#fff;-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-tag-input.bp3-active.bp3-intent-primary{-webkit-box-shadow:0 0 0 1px #106ba3,0 0 0 3px rgba(16,107,163,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #106ba3,0 0 0 3px #106ba34d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-tag-input.bp3-active.bp3-intent-success{-webkit-box-shadow:0 0 0 1px #0d8050,0 0 0 3px rgba(13,128,80,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #0d8050,0 0 0 3px #0d80504d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-tag-input.bp3-active.bp3-intent-warning{-webkit-box-shadow:0 0 0 1px #bf7326,0 0 0 3px rgba(191,115,38,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #bf7326,0 0 0 3px #bf73264d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-tag-input.bp3-active.bp3-intent-danger{-webkit-box-shadow:0 0 0 1px #c23030,0 0 0 3px rgba(194,48,48,.3),inset 0 1px 1px rgba(16,22,26,.2);box-shadow:0 0 0 1px #c23030,0 0 0 3px #c230304d,inset 0 1px 1px #10161a33}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-tag-input-icon,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-tag-input-icon{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-input-ghost,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-input-ghost{color:#f5f8fa}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-input-ghost::-webkit-input-placeholder,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-input-ghost::-webkit-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-input-ghost::-moz-placeholder,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-input-ghost::-moz-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-input-ghost:-ms-input-placeholder,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-input-ghost:-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-input-ghost::-ms-input-placeholder,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-input-ghost::-ms-input-placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-tag-input .bp3-input-ghost::placeholder,.jupyter-wrapper .bp3-tag-input.bp3-dark .bp3-input-ghost::placeholder{color:#a7b6c299}.jupyter-wrapper .bp3-dark .bp3-tag-input.bp3-active,.jupyter-wrapper .bp3-tag-input.bp3-dark.bp3-active{background-color:#10161a4d;-webkit-box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px rgba(19,124,189,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #137cbd,0 0 0 1px #137cbd,0 0 0 3px #137cbd4d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-tag-input.bp3-active.bp3-intent-primary,.jupyter-wrapper .bp3-tag-input.bp3-dark.bp3-active.bp3-intent-primary{-webkit-box-shadow:0 0 0 1px #106ba3,0 0 0 3px rgba(16,107,163,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #106ba3,0 0 0 3px #106ba34d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-tag-input.bp3-active.bp3-intent-success,.jupyter-wrapper .bp3-tag-input.bp3-dark.bp3-active.bp3-intent-success{-webkit-box-shadow:0 0 0 1px #0d8050,0 0 0 3px rgba(13,128,80,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #0d8050,0 0 0 3px #0d80504d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-tag-input.bp3-active.bp3-intent-warning,.jupyter-wrapper .bp3-tag-input.bp3-dark.bp3-active.bp3-intent-warning{-webkit-box-shadow:0 0 0 1px #bf7326,0 0 0 3px rgba(191,115,38,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #bf7326,0 0 0 3px #bf73264d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-dark .bp3-tag-input.bp3-active.bp3-intent-danger,.jupyter-wrapper .bp3-tag-input.bp3-dark.bp3-active.bp3-intent-danger{-webkit-box-shadow:0 0 0 1px #c23030,0 0 0 3px rgba(194,48,48,.3),inset 0 0 0 1px rgba(16,22,26,.3),inset 0 1px 1px rgba(16,22,26,.4);box-shadow:0 0 0 1px #c23030,0 0 0 3px #c230304d,inset 0 0 0 1px #10161a4d,inset 0 1px 1px #10161a66}.jupyter-wrapper .bp3-input-ghost{background:none;border:none;-webkit-box-shadow:none;box-shadow:none;padding:0}.jupyter-wrapper .bp3-input-ghost::-webkit-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input-ghost::-moz-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input-ghost:-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input-ghost::-ms-input-placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input-ghost::placeholder{color:#5c708099;opacity:1}.jupyter-wrapper .bp3-input-ghost:focus{outline:none!important}.jupyter-wrapper .bp3-toast{-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;background-color:#fff;border-radius:3px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33;display:-webkit-box;display:-ms-flexbox;display:flex;margin:20px 0 0;max-width:500px;min-width:300px;pointer-events:all;position:relative!important}.jupyter-wrapper .bp3-toast.bp3-toast-enter,.jupyter-wrapper .bp3-toast.bp3-toast-appear{-webkit-transform:translateY(-40px);transform:translateY(-40px)}.jupyter-wrapper .bp3-toast.bp3-toast-enter-active,.jupyter-wrapper .bp3-toast.bp3-toast-appear-active{-webkit-transform:translateY(0);transform:translateY(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.54,1.12,.38,1.11);transition-timing-function:cubic-bezier(.54,1.12,.38,1.11)}.jupyter-wrapper .bp3-toast.bp3-toast-enter~.bp3-toast,.jupyter-wrapper .bp3-toast.bp3-toast-appear~.bp3-toast{-webkit-transform:translateY(-40px);transform:translateY(-40px)}.jupyter-wrapper .bp3-toast.bp3-toast-enter-active~.bp3-toast,.jupyter-wrapper .bp3-toast.bp3-toast-appear-active~.bp3-toast{-webkit-transform:translateY(0);transform:translateY(0);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.54,1.12,.38,1.11);transition-timing-function:cubic-bezier(.54,1.12,.38,1.11)}.jupyter-wrapper .bp3-toast.bp3-toast-exit{opacity:1;-webkit-filter:blur(0);filter:blur(0)}.jupyter-wrapper .bp3-toast.bp3-toast-exit-active{opacity:0;-webkit-filter:blur(10px);filter:blur(10px);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transition-property:opacity,-webkit-filter;transition-property:opacity,-webkit-filter;transition-property:opacity,filter;transition-property:opacity,filter,-webkit-filter;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-toast.bp3-toast-exit~.bp3-toast{-webkit-transform:translateY(0);transform:translateY(0)}.jupyter-wrapper .bp3-toast.bp3-toast-exit-active~.bp3-toast{-webkit-transform:translateY(-40px);transform:translateY(-40px);-webkit-transition-delay:50ms;transition-delay:50ms;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-toast .bp3-button-group{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;padding:5px 5px 5px 0}.jupyter-wrapper .bp3-toast>.bp3-icon{color:#5c7080;margin:12px 0 12px 12px}.jupyter-wrapper .bp3-toast.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-toast{background-color:#394b59;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66}.jupyter-wrapper .bp3-toast.bp3-dark>.bp3-icon,.jupyter-wrapper .bp3-dark .bp3-toast>.bp3-icon{color:#a7b6c2}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] a{color:#ffffffb3}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] a:hover{color:#fff}.jupyter-wrapper .bp3-toast[class*=bp3-intent-]>.bp3-icon{color:#fff}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button,.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button:before,.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button .bp3-icon,.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button:active{color:#ffffffb3!important}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button:focus{outline-color:#ffffff80}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button:hover{background-color:#ffffff26!important;color:#fff!important}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button:active{background-color:#ffffff4d!important;color:#fff!important}.jupyter-wrapper .bp3-toast[class*=bp3-intent-] .bp3-button:after{background:rgba(255,255,255,.3)!important}.jupyter-wrapper .bp3-toast.bp3-intent-primary{background-color:#137cbd;color:#fff}.jupyter-wrapper .bp3-toast.bp3-intent-success{background-color:#0f9960;color:#fff}.jupyter-wrapper .bp3-toast.bp3-intent-warning{background-color:#d9822b;color:#fff}.jupyter-wrapper .bp3-toast.bp3-intent-danger{background-color:#db3737;color:#fff}.jupyter-wrapper .bp3-toast-message{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:11px;word-break:break-word}.jupyter-wrapper .bp3-toast-container{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box!important;display:-ms-flexbox!important;display:flex!important;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;left:0;overflow:hidden;padding:0 20px 20px;pointer-events:none;right:0;z-index:40}.jupyter-wrapper .bp3-toast-container.bp3-toast-container-in-portal{position:fixed}.jupyter-wrapper .bp3-toast-container.bp3-toast-container-inline{position:absolute}.jupyter-wrapper .bp3-toast-container.bp3-toast-container-top{top:0}.jupyter-wrapper .bp3-toast-container.bp3-toast-container-bottom{bottom:0;-webkit-box-orient:vertical;-webkit-box-direction:reverse;-ms-flex-direction:column-reverse;flex-direction:column-reverse;top:auto}.jupyter-wrapper .bp3-toast-container.bp3-toast-container-left{-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start}.jupyter-wrapper .bp3-toast-container.bp3-toast-container-right{-webkit-box-align:end;-ms-flex-align:end;align-items:flex-end}.jupyter-wrapper .bp3-toast-container-bottom .bp3-toast.bp3-toast-enter:not(.bp3-toast-enter-active),.jupyter-wrapper .bp3-toast-container-bottom .bp3-toast.bp3-toast-enter:not(.bp3-toast-enter-active)~.bp3-toast,.jupyter-wrapper .bp3-toast-container-bottom .bp3-toast.bp3-toast-appear:not(.bp3-toast-appear-active),.jupyter-wrapper .bp3-toast-container-bottom .bp3-toast.bp3-toast-appear:not(.bp3-toast-appear-active)~.bp3-toast,.jupyter-wrapper .bp3-toast-container-bottom .bp3-toast.bp3-toast-exit-active~.bp3-toast,.jupyter-wrapper .bp3-toast-container-bottom .bp3-toast.bp3-toast-leave-active~.bp3-toast{-webkit-transform:translateY(60px);transform:translateY(60px)}.jupyter-wrapper .bp3-tooltip{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 2px 4px rgba(16,22,26,.2),0 8px 24px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 2px 4px #10161a33,0 8px 24px #10161a33;-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-tooltip .bp3-popover-arrow{height:22px;position:absolute;width:22px}.jupyter-wrapper .bp3-tooltip .bp3-popover-arrow:before{height:14px;margin:4px;width:14px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-top>.bp3-tooltip{margin-bottom:11px;margin-top:-11px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-top>.bp3-tooltip>.bp3-popover-arrow{bottom:-8px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-top>.bp3-tooltip>.bp3-popover-arrow svg{-webkit-transform:rotate(-90deg);transform:rotate(-90deg)}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-right>.bp3-tooltip{margin-left:11px}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-right>.bp3-tooltip>.bp3-popover-arrow{left:-8px}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-right>.bp3-tooltip>.bp3-popover-arrow svg{-webkit-transform:rotate(0);transform:rotate(0)}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-bottom>.bp3-tooltip{margin-top:11px}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-bottom>.bp3-tooltip>.bp3-popover-arrow{top:-8px}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-bottom>.bp3-tooltip>.bp3-popover-arrow svg{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-left>.bp3-tooltip{margin-left:-11px;margin-right:11px}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-left>.bp3-tooltip>.bp3-popover-arrow{right:-8px}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-left>.bp3-tooltip>.bp3-popover-arrow svg{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.jupyter-wrapper .bp3-tether-element-attached-middle>.bp3-tooltip>.bp3-popover-arrow{top:50%;-webkit-transform:translateY(-50%);transform:translateY(-50%)}.jupyter-wrapper .bp3-tether-element-attached-center>.bp3-tooltip>.bp3-popover-arrow{right:50%;-webkit-transform:translateX(50%);transform:translate(50%)}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-target-attached-top>.bp3-tooltip>.bp3-popover-arrow{top:-.22183px}.jupyter-wrapper .bp3-tether-element-attached-right.bp3-tether-target-attached-right>.bp3-tooltip>.bp3-popover-arrow{right:-.22183px}.jupyter-wrapper .bp3-tether-element-attached-left.bp3-tether-target-attached-left>.bp3-tooltip>.bp3-popover-arrow{left:-.22183px}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-target-attached-bottom>.bp3-tooltip>.bp3-popover-arrow{bottom:-.22183px}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-element-attached-left>.bp3-tooltip{-webkit-transform-origin:top left;transform-origin:top left}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-element-attached-center>.bp3-tooltip{-webkit-transform-origin:top center;transform-origin:top center}.jupyter-wrapper .bp3-tether-element-attached-top.bp3-tether-element-attached-right>.bp3-tooltip{-webkit-transform-origin:top right;transform-origin:top right}.jupyter-wrapper .bp3-tether-element-attached-middle.bp3-tether-element-attached-left>.bp3-tooltip{-webkit-transform-origin:center left;transform-origin:center left}.jupyter-wrapper .bp3-tether-element-attached-middle.bp3-tether-element-attached-center>.bp3-tooltip{-webkit-transform-origin:center center;transform-origin:center center}.jupyter-wrapper .bp3-tether-element-attached-middle.bp3-tether-element-attached-right>.bp3-tooltip{-webkit-transform-origin:center right;transform-origin:center right}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-element-attached-left>.bp3-tooltip{-webkit-transform-origin:bottom left;transform-origin:bottom left}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-element-attached-center>.bp3-tooltip{-webkit-transform-origin:bottom center;transform-origin:bottom center}.jupyter-wrapper .bp3-tether-element-attached-bottom.bp3-tether-element-attached-right>.bp3-tooltip{-webkit-transform-origin:bottom right;transform-origin:bottom right}.jupyter-wrapper .bp3-tooltip .bp3-popover-content{background:#394b59;color:#f5f8fa}.jupyter-wrapper .bp3-tooltip .bp3-popover-arrow:before{-webkit-box-shadow:1px 1px 6px rgba(16,22,26,.2);box-shadow:1px 1px 6px #10161a33}.jupyter-wrapper .bp3-tooltip .bp3-popover-arrow-border{fill:#10161a;fill-opacity:.1}.jupyter-wrapper .bp3-tooltip .bp3-popover-arrow-fill{fill:#394b59}.jupyter-wrapper .bp3-popover-enter>.bp3-tooltip,.jupyter-wrapper .bp3-popover-appear>.bp3-tooltip{-webkit-transform:scale(.8);transform:scale(.8)}.jupyter-wrapper .bp3-popover-enter-active>.bp3-tooltip,.jupyter-wrapper .bp3-popover-appear-active>.bp3-tooltip{-webkit-transform:scale(1);transform:scale(1);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-popover-exit>.bp3-tooltip{-webkit-transform:scale(1);transform:scale(1)}.jupyter-wrapper .bp3-popover-exit-active>.bp3-tooltip{-webkit-transform:scale(.8);transform:scale(.8);-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.1s;transition-duration:.1s;-webkit-transition-property:-webkit-transform;transition-property:-webkit-transform;transition-property:transform;transition-property:transform,-webkit-transform;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-tooltip .bp3-popover-content{padding:10px 12px}.jupyter-wrapper .bp3-tooltip.bp3-dark,.jupyter-wrapper .bp3-dark .bp3-tooltip{-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 2px 4px rgba(16,22,26,.4),0 8px 24px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 2px 4px #10161a66,0 8px 24px #10161a66}.jupyter-wrapper .bp3-tooltip.bp3-dark .bp3-popover-content,.jupyter-wrapper .bp3-dark .bp3-tooltip .bp3-popover-content{background:#e1e8ed;color:#394b59}.jupyter-wrapper .bp3-tooltip.bp3-dark .bp3-popover-arrow:before,.jupyter-wrapper .bp3-dark .bp3-tooltip .bp3-popover-arrow:before{-webkit-box-shadow:1px 1px 6px rgba(16,22,26,.4);box-shadow:1px 1px 6px #10161a66}.jupyter-wrapper .bp3-tooltip.bp3-dark .bp3-popover-arrow-border,.jupyter-wrapper .bp3-dark .bp3-tooltip .bp3-popover-arrow-border{fill:#10161a;fill-opacity:.2}.jupyter-wrapper .bp3-tooltip.bp3-dark .bp3-popover-arrow-fill,.jupyter-wrapper .bp3-dark .bp3-tooltip .bp3-popover-arrow-fill{fill:#e1e8ed}.jupyter-wrapper .bp3-tooltip.bp3-intent-primary .bp3-popover-content{background:#137cbd;color:#fff}.jupyter-wrapper .bp3-tooltip.bp3-intent-primary .bp3-popover-arrow-fill{fill:#137cbd}.jupyter-wrapper .bp3-tooltip.bp3-intent-success .bp3-popover-content{background:#0f9960;color:#fff}.jupyter-wrapper .bp3-tooltip.bp3-intent-success .bp3-popover-arrow-fill{fill:#0f9960}.jupyter-wrapper .bp3-tooltip.bp3-intent-warning .bp3-popover-content{background:#d9822b;color:#fff}.jupyter-wrapper .bp3-tooltip.bp3-intent-warning .bp3-popover-arrow-fill{fill:#d9822b}.jupyter-wrapper .bp3-tooltip.bp3-intent-danger .bp3-popover-content{background:#db3737;color:#fff}.jupyter-wrapper .bp3-tooltip.bp3-intent-danger .bp3-popover-arrow-fill{fill:#db3737}.jupyter-wrapper .bp3-tooltip-indicator{border-bottom:dotted 1px;cursor:help}.jupyter-wrapper .bp3-tree .bp3-icon,.jupyter-wrapper .bp3-tree .bp3-icon-standard,.jupyter-wrapper .bp3-tree .bp3-icon-large{color:#5c7080}.jupyter-wrapper .bp3-tree .bp3-icon.bp3-intent-primary,.jupyter-wrapper .bp3-tree .bp3-icon-standard.bp3-intent-primary,.jupyter-wrapper .bp3-tree .bp3-icon-large.bp3-intent-primary{color:#137cbd}.jupyter-wrapper .bp3-tree .bp3-icon.bp3-intent-success,.jupyter-wrapper .bp3-tree .bp3-icon-standard.bp3-intent-success,.jupyter-wrapper .bp3-tree .bp3-icon-large.bp3-intent-success{color:#0f9960}.jupyter-wrapper .bp3-tree .bp3-icon.bp3-intent-warning,.jupyter-wrapper .bp3-tree .bp3-icon-standard.bp3-intent-warning,.jupyter-wrapper .bp3-tree .bp3-icon-large.bp3-intent-warning{color:#d9822b}.jupyter-wrapper .bp3-tree .bp3-icon.bp3-intent-danger,.jupyter-wrapper .bp3-tree .bp3-icon-standard.bp3-intent-danger,.jupyter-wrapper .bp3-tree .bp3-icon-large.bp3-intent-danger{color:#db3737}.jupyter-wrapper .bp3-tree-node-list{list-style:none;margin:0;padding-left:0}.jupyter-wrapper .bp3-tree-root{background-color:transparent;cursor:default;padding-left:0;position:relative}.jupyter-wrapper .bp3-tree-node-content-0{padding-left:0}.jupyter-wrapper .bp3-tree-node-content-1{padding-left:23px}.jupyter-wrapper .bp3-tree-node-content-2{padding-left:46px}.jupyter-wrapper .bp3-tree-node-content-3{padding-left:69px}.jupyter-wrapper .bp3-tree-node-content-4{padding-left:92px}.jupyter-wrapper .bp3-tree-node-content-5{padding-left:115px}.jupyter-wrapper .bp3-tree-node-content-6{padding-left:138px}.jupyter-wrapper .bp3-tree-node-content-7{padding-left:161px}.jupyter-wrapper .bp3-tree-node-content-8{padding-left:184px}.jupyter-wrapper .bp3-tree-node-content-9{padding-left:207px}.jupyter-wrapper .bp3-tree-node-content-10{padding-left:230px}.jupyter-wrapper .bp3-tree-node-content-11{padding-left:253px}.jupyter-wrapper .bp3-tree-node-content-12{padding-left:276px}.jupyter-wrapper .bp3-tree-node-content-13{padding-left:299px}.jupyter-wrapper .bp3-tree-node-content-14{padding-left:322px}.jupyter-wrapper .bp3-tree-node-content-15{padding-left:345px}.jupyter-wrapper .bp3-tree-node-content-16{padding-left:368px}.jupyter-wrapper .bp3-tree-node-content-17{padding-left:391px}.jupyter-wrapper .bp3-tree-node-content-18{padding-left:414px}.jupyter-wrapper .bp3-tree-node-content-19{padding-left:437px}.jupyter-wrapper .bp3-tree-node-content-20{padding-left:460px}.jupyter-wrapper .bp3-tree-node-content{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;height:30px;padding-right:5px;width:100%}.jupyter-wrapper .bp3-tree-node-content:hover{background-color:#bfccd666}.jupyter-wrapper .bp3-tree-node-caret,.jupyter-wrapper .bp3-tree-node-caret-none{min-width:30px}.jupyter-wrapper .bp3-tree-node-caret{color:#5c7080;cursor:pointer;padding:7px;-webkit-transform:rotate(0deg);transform:rotate(0);-webkit-transition:-webkit-transform .2s cubic-bezier(.4,1,.75,.9);transition:-webkit-transform .2s cubic-bezier(.4,1,.75,.9);transition:transform .2s cubic-bezier(.4,1,.75,.9);transition:transform .2s cubic-bezier(.4,1,.75,.9),-webkit-transform .2s cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-tree-node-caret:hover{color:#182026}.jupyter-wrapper .bp3-dark .bp3-tree-node-caret{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-tree-node-caret:hover{color:#f5f8fa}.jupyter-wrapper .bp3-tree-node-caret.bp3-tree-node-caret-open{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.jupyter-wrapper .bp3-tree-node-caret.bp3-icon-standard:before{content:\"\ue695\"}.jupyter-wrapper .bp3-tree-node-icon{margin-right:7px;position:relative}.jupyter-wrapper .bp3-tree-node-label{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;word-wrap:normal;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;position:relative;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .bp3-tree-node-label span{display:inline}.jupyter-wrapper .bp3-tree-node-secondary-label{padding:0 5px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .bp3-tree-node-secondary-label .bp3-popover-wrapper,.jupyter-wrapper .bp3-tree-node-secondary-label .bp3-popover-target{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex}.jupyter-wrapper .bp3-tree-node.bp3-disabled .bp3-tree-node-content{background-color:inherit;color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-tree-node.bp3-disabled .bp3-tree-node-caret,.jupyter-wrapper .bp3-tree-node.bp3-disabled .bp3-tree-node-icon{color:#5c708099;cursor:not-allowed}.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content{background-color:#137cbd}.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content,.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content .bp3-icon,.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content .bp3-icon-standard,.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content .bp3-icon-large{color:#fff}.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content .bp3-tree-node-caret:before{color:#ffffffb3}.jupyter-wrapper .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content .bp3-tree-node-caret:hover:before{color:#fff}.jupyter-wrapper .bp3-dark .bp3-tree-node-content:hover{background-color:#5c70804d}.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-standard,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-large{color:#a7b6c2}.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon.bp3-intent-primary,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-standard.bp3-intent-primary,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-large.bp3-intent-primary{color:#137cbd}.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon.bp3-intent-success,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-standard.bp3-intent-success,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-large.bp3-intent-success{color:#0f9960}.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon.bp3-intent-warning,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-standard.bp3-intent-warning,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-large.bp3-intent-warning{color:#d9822b}.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon.bp3-intent-danger,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-standard.bp3-intent-danger,.jupyter-wrapper .bp3-dark .bp3-tree .bp3-icon-large.bp3-intent-danger{color:#db3737}.jupyter-wrapper .bp3-dark .bp3-tree-node.bp3-tree-node-selected>.bp3-tree-node-content{background-color:#137cbd}.jupyter-wrapper .bp3-omnibar{-webkit-filter:blur(0);filter:blur(0);opacity:1;background-color:#fff;border-radius:3px;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.1),0 4px 8px rgba(16,22,26,.2),0 18px 46px 6px rgba(16,22,26,.2);box-shadow:0 0 0 1px #10161a1a,0 4px 8px #10161a33,0 18px 46px 6px #10161a33;left:calc(50% - 250px);top:20vh;width:500px;z-index:21}.jupyter-wrapper .bp3-omnibar.bp3-overlay-enter,.jupyter-wrapper .bp3-omnibar.bp3-overlay-appear{-webkit-filter:blur(20px);filter:blur(20px);opacity:.2}.jupyter-wrapper .bp3-omnibar.bp3-overlay-enter-active,.jupyter-wrapper .bp3-omnibar.bp3-overlay-appear-active{-webkit-filter:blur(0);filter:blur(0);opacity:1;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:opacity,-webkit-filter;transition-property:opacity,-webkit-filter;transition-property:filter,opacity;transition-property:filter,opacity,-webkit-filter;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-omnibar.bp3-overlay-exit{-webkit-filter:blur(0);filter:blur(0);opacity:1}.jupyter-wrapper .bp3-omnibar.bp3-overlay-exit-active{-webkit-filter:blur(20px);filter:blur(20px);opacity:.2;-webkit-transition-delay:0;transition-delay:0;-webkit-transition-duration:.2s;transition-duration:.2s;-webkit-transition-property:opacity,-webkit-filter;transition-property:opacity,-webkit-filter;transition-property:filter,opacity;transition-property:filter,opacity,-webkit-filter;-webkit-transition-timing-function:cubic-bezier(.4,1,.75,.9);transition-timing-function:cubic-bezier(.4,1,.75,.9)}.jupyter-wrapper .bp3-omnibar .bp3-input{background-color:transparent;border-radius:0}.jupyter-wrapper .bp3-omnibar .bp3-input,.jupyter-wrapper .bp3-omnibar .bp3-input:focus{-webkit-box-shadow:none;box-shadow:none}.jupyter-wrapper .bp3-omnibar .bp3-menu{background-color:transparent;border-radius:0;-webkit-box-shadow:inset 0 1px 0 rgba(16,22,26,.15);box-shadow:inset 0 1px #10161a26;max-height:calc(60vh - 40px);overflow:auto}.jupyter-wrapper .bp3-omnibar .bp3-menu:empty{display:none}.jupyter-wrapper .bp3-dark .bp3-omnibar,.jupyter-wrapper .bp3-omnibar.bp3-dark{background-color:#30404d;-webkit-box-shadow:0 0 0 1px rgba(16,22,26,.2),0 4px 8px rgba(16,22,26,.4),0 18px 46px 6px rgba(16,22,26,.4);box-shadow:0 0 0 1px #10161a33,0 4px 8px #10161a66,0 18px 46px 6px #10161a66}.jupyter-wrapper .bp3-omnibar-overlay .bp3-overlay-backdrop{background-color:#10161a33}.jupyter-wrapper .bp3-multi-select{min-width:150px}.jupyter-wrapper .bp3-multi-select-popover .bp3-menu{max-height:300px;max-width:400px;overflow:auto}.jupyter-wrapper .bp3-select-popover .bp3-popover-content{padding:5px}.jupyter-wrapper .bp3-select-popover .bp3-input-group{margin-bottom:0}.jupyter-wrapper .bp3-select-popover .bp3-menu{max-height:300px;max-width:400px;overflow:auto;padding:0}.jupyter-wrapper .bp3-select-popover .bp3-menu:not(:first-child){padding-top:5px}.jupyter-wrapper :root{--jp-icon-add-above: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTQiIHZpZXdCb3g9IjAgMCAxNCAxNCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPGcgY2xpcC1wYXRoPSJ1cmwoI2NsaXAwXzEzN18xOTQ5MikiPgo8cGF0aCBjbGFzcz0ianAtaWNvbjMiIGQ9Ik00Ljc1IDQuOTMwNjZINi42MjVWNi44MDU2NkM2LjYyNSA3LjAxMTkxIDYuNzkzNzUgNy4xODA2NiA3IDcuMTgwNjZDNy4yMDYyNSA3LjE4MDY2IDcuMzc1IDcuMDExOTEgNy4zNzUgNi44MDU2NlY0LjkzMDY2SDkuMjVDOS40NTYyNSA0LjkzMDY2IDkuNjI1IDQuNzYxOTEgOS42MjUgNC41NTU2NkM5LjYyNSA0LjM0OTQxIDkuNDU2MjUgNC4xODA2NiA5LjI1IDQuMTgwNjZINy4zNzVWMi4zMDU2NkM3LjM3NSAyLjA5OTQxIDcuMjA2MjUgMS45MzA2NiA3IDEuOTMwNjZDNi43OTM3NSAxLjkzMDY2IDYuNjI1IDIuMDk5NDEgNi42MjUgMi4zMDU2NlY0LjE4MDY2SDQuNzVDNC41NDM3NSA0LjE4MDY2IDQuMzc1IDQuMzQ5NDEgNC4zNzUgNC41NTU2NkM0LjM3NSA0Ljc2MTkxIDQuNTQzNzUgNC45MzA2NiA0Ljc1IDQuOTMwNjZaIiBmaWxsPSIjNjE2MTYxIiBzdHJva2U9IiM2MTYxNjEiIHN0cm9rZS13aWR0aD0iMC43Ii8+CjwvZz4KPHBhdGggY2xhc3M9ImpwLWljb24zIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTExLjUgOS41VjExLjVMMi41IDExLjVWOS41TDExLjUgOS41Wk0xMiA4QzEyLjU1MjMgOCAxMyA4LjQ0NzcyIDEzIDlWMTJDMTMgMTIuNTUyMyAxMi41NTIzIDEzIDEyIDEzTDIgMTNDMS40NDc3MiAxMyAxIDEyLjU1MjMgMSAxMlY5QzEgOC40NDc3MiAxLjQ0NzcxIDggMiA4TDEyIDhaIiBmaWxsPSIjNjE2MTYxIi8+CjxkZWZzPgo8Y2xpcFBhdGggaWQ9ImNsaXAwXzEzN18xOTQ5MiI+CjxyZWN0IGNsYXNzPSJqcC1pY29uMyIgd2lkdGg9IjYiIGhlaWdodD0iNiIgZmlsbD0id2hpdGUiIHRyYW5zZm9ybT0ibWF0cml4KC0xIDAgMCAxIDEwIDEuNTU1NjYpIi8+CjwvY2xpcFBhdGg+CjwvZGVmcz4KPC9zdmc+Cg==);--jp-icon-add-below: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTQiIHZpZXdCb3g9IjAgMCAxNCAxNCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPGcgY2xpcC1wYXRoPSJ1cmwoI2NsaXAwXzEzN18xOTQ5OCkiPgo8cGF0aCBjbGFzcz0ianAtaWNvbjMiIGQ9Ik05LjI1IDEwLjA2OTNMNy4zNzUgMTAuMDY5M0w3LjM3NSA4LjE5NDM0QzcuMzc1IDcuOTg4MDkgNy4yMDYyNSA3LjgxOTM0IDcgNy44MTkzNEM2Ljc5Mzc1IDcuODE5MzQgNi42MjUgNy45ODgwOSA2LjYyNSA4LjE5NDM0TDYuNjI1IDEwLjA2OTNMNC43NSAxMC4wNjkzQzQuNTQzNzUgMTAuMDY5MyA0LjM3NSAxMC4yMzgxIDQuMzc1IDEwLjQ0NDNDNC4zNzUgMTAuNjUwNiA0LjU0Mzc1IDEwLjgxOTMgNC43NSAxMC44MTkzTDYuNjI1IDEwLjgxOTNMNi42MjUgMTIuNjk0M0M2LjYyNSAxMi45MDA2IDYuNzkzNzUgMTMuMDY5MyA3IDEzLjA2OTNDNy4yMDYyNSAxMy4wNjkzIDcuMzc1IDEyLjkwMDYgNy4zNzUgMTIuNjk0M0w3LjM3NSAxMC44MTkzTDkuMjUgMTAuODE5M0M5LjQ1NjI1IDEwLjgxOTMgOS42MjUgMTAuNjUwNiA5LjYyNSAxMC40NDQzQzkuNjI1IDEwLjIzODEgOS40NTYyNSAxMC4wNjkzIDkuMjUgMTAuMDY5M1oiIGZpbGw9IiM2MTYxNjEiIHN0cm9rZT0iIzYxNjE2MSIgc3Ryb2tlLXdpZHRoPSIwLjciLz4KPC9nPgo8cGF0aCBjbGFzcz0ianAtaWNvbjMiIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMi41IDUuNUwyLjUgMy41TDExLjUgMy41TDExLjUgNS41TDIuNSA1LjVaTTIgN0MxLjQ0NzcyIDcgMSA2LjU1MjI4IDEgNkwxIDNDMSAyLjQ0NzcyIDEuNDQ3NzIgMiAyIDJMMTIgMkMxMi41NTIzIDIgMTMgMi40NDc3MiAxMyAzTDEzIDZDMTMgNi41NTIyOSAxMi41NTIzIDcgMTIgN0wyIDdaIiBmaWxsPSIjNjE2MTYxIi8+CjxkZWZzPgo8Y2xpcFBhdGggaWQ9ImNsaXAwXzEzN18xOTQ5OCI+CjxyZWN0IGNsYXNzPSJqcC1pY29uMyIgd2lkdGg9IjYiIGhlaWdodD0iNiIgZmlsbD0id2hpdGUiIHRyYW5zZm9ybT0ibWF0cml4KDEgMS43NDg0NmUtMDcgMS43NDg0NmUtMDcgLTEgNCAxMy40NDQzKSIvPgo8L2NsaXBQYXRoPgo8L2RlZnM+Cjwvc3ZnPgo=);--jp-icon-add: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTE5IDEzaC02djZoLTJ2LTZINXYtMmg2VjVoMnY2aDZ2MnoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-bell: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE2IDE2IiB2ZXJzaW9uPSIxLjEiPgogICA8cGF0aCBjbGFzcz0ianAtaWNvbjIganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjMzMzMzMzIgogICAgICBkPSJtOCAwLjI5Yy0xLjQgMC0yLjcgMC43My0zLjYgMS44LTEuMiAxLjUtMS40IDMuNC0xLjUgNS4yLTAuMTggMi4yLTAuNDQgNC0yLjMgNS4zbDAuMjggMS4zaDVjMC4wMjYgMC42NiAwLjMyIDEuMSAwLjcxIDEuNSAwLjg0IDAuNjEgMiAwLjYxIDIuOCAwIDAuNTItMC40IDAuNi0xIDAuNzEtMS41aDVsMC4yOC0xLjNjLTEuOS0wLjk3LTIuMi0zLjMtMi4zLTUuMy0wLjEzLTEuOC0wLjI2LTMuNy0xLjUtNS4yLTAuODUtMS0yLjItMS44LTMuNi0xLjh6bTAgMS40YzAuODggMCAxLjkgMC41NSAyLjUgMS4zIDAuODggMS4xIDEuMSAyLjcgMS4yIDQuNCAwLjEzIDEuNyAwLjIzIDMuNiAxLjMgNS4yaC0xMGMxLjEtMS42IDEuMi0zLjQgMS4zLTUuMiAwLjEzLTEuNyAwLjMtMy4zIDEuMi00LjQgMC41OS0wLjcyIDEuNi0xLjMgMi41LTEuM3ptLTAuNzQgMTJoMS41Yy0wLjAwMTUgMC4yOCAwLjAxNSAwLjc5LTAuNzQgMC43OS0wLjczIDAuMDAxNi0wLjcyLTAuNTMtMC43NC0wLjc5eiIgLz4KPC9zdmc+Cg==);--jp-icon-bug-dot: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCAyNCAyNCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiM2MTYxNjEiPgogICAgICAgIDxwYXRoIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMTcuMTkgOEgyMFYxMEgxNy45MUMxNy45NiAxMC4zMyAxOCAxMC42NiAxOCAxMVYxMkgyMFYxNEgxOC41SDE4VjE0LjAyNzVDMTUuNzUgMTQuMjc2MiAxNCAxNi4xODM3IDE0IDE4LjVDMTQgMTkuMjA4IDE0LjE2MzUgMTkuODc3OSAxNC40NTQ5IDIwLjQ3MzlDMTMuNzA2MyAyMC44MTE3IDEyLjg3NTcgMjEgMTIgMjFDOS43OCAyMSA3Ljg1IDE5Ljc5IDYuODEgMThINFYxNkg2LjA5QzYuMDQgMTUuNjcgNiAxNS4zNCA2IDE1VjE0SDRWMTJINlYxMUM2IDEwLjY2IDYuMDQgMTAuMzMgNi4wOSAxMEg0VjhINi44MUM3LjI2IDcuMjIgNy44OCA2LjU1IDguNjIgNi4wNEw3IDQuNDFMOC40MSAzTDEwLjU5IDUuMTdDMTEuMDQgNS4wNiAxMS41MSA1IDEyIDVDMTIuNDkgNSAxMi45NiA1LjA2IDEzLjQyIDUuMTdMMTUuNTkgM0wxNyA0LjQxTDE1LjM3IDYuMDRDMTYuMTIgNi41NSAxNi43NCA3LjIyIDE3LjE5IDhaTTEwIDE2SDE0VjE0SDEwVjE2Wk0xMCAxMkgxNFYxMEgxMFYxMloiIGZpbGw9IiM2MTYxNjEiLz4KICAgICAgICA8cGF0aCBkPSJNMjIgMTguNUMyMiAyMC40MzMgMjAuNDMzIDIyIDE4LjUgMjJDMTYuNTY3IDIyIDE1IDIwLjQzMyAxNSAxOC41QzE1IDE2LjU2NyAxNi41NjcgMTUgMTguNSAxNUMyMC40MzMgMTUgMjIgMTYuNTY3IDIyIDE4LjVaIiBmaWxsPSIjNjE2MTYxIi8+CiAgICA8L2c+Cjwvc3ZnPgo=);--jp-icon-bug: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIj4KICAgIDxwYXRoIGQ9Ik0yMCA4aC0yLjgxYy0uNDUtLjc4LTEuMDctMS40NS0xLjgyLTEuOTZMMTcgNC40MSAxNS41OSAzbC0yLjE3IDIuMTdDMTIuOTYgNS4wNiAxMi40OSA1IDEyIDVjLS40OSAwLS45Ni4wNi0xLjQxLjE3TDguNDEgMyA3IDQuNDFsMS42MiAxLjYzQzcuODggNi41NSA3LjI2IDcuMjIgNi44MSA4SDR2MmgyLjA5Yy0uMDUuMzMtLjA5LjY2LS4wOSAxdjFINHYyaDJ2MWMwIC4zNC4wNC42Ny4wOSAxSDR2MmgyLjgxYzEuMDQgMS43OSAyLjk3IDMgNS4xOSAzczQuMTUtMS4yMSA1LjE5LTNIMjB2LTJoLTIuMDljLjA1LS4zMy4wOS0uNjYuMDktMXYtMWgydi0yaC0ydi0xYzAtLjM0LS4wNC0uNjctLjA5LTFIMjBWOHptLTYgOGgtNHYtMmg0djJ6bTAtNGgtNHYtMmg0djJ6Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-build: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTYiIHZpZXdCb3g9IjAgMCAyNCAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTE0LjkgMTcuNDVDMTYuMjUgMTcuNDUgMTcuMzUgMTYuMzUgMTcuMzUgMTVDMTcuMzUgMTMuNjUgMTYuMjUgMTIuNTUgMTQuOSAxMi41NUMxMy41NCAxMi41NSAxMi40NSAxMy42NSAxMi40NSAxNUMxMi40NSAxNi4zNSAxMy41NCAxNy40NSAxNC45IDE3LjQ1Wk0yMC4xIDE1LjY4TDIxLjU4IDE2Ljg0QzIxLjcxIDE2Ljk1IDIxLjc1IDE3LjEzIDIxLjY2IDE3LjI5TDIwLjI2IDE5LjcxQzIwLjE3IDE5Ljg2IDIwIDE5LjkyIDE5LjgzIDE5Ljg2TDE4LjA5IDE5LjE2QzE3LjczIDE5LjQ0IDE3LjMzIDE5LjY3IDE2LjkxIDE5Ljg1TDE2LjY0IDIxLjdDMTYuNjIgMjEuODcgMTYuNDcgMjIgMTYuMyAyMkgxMy41QzEzLjMyIDIyIDEzLjE4IDIxLjg3IDEzLjE1IDIxLjdMMTIuODkgMTkuODVDMTIuNDYgMTkuNjcgMTIuMDcgMTkuNDQgMTEuNzEgMTkuMTZMOS45NjAwMiAxOS44NkM5LjgxMDAyIDE5LjkyIDkuNjIwMDIgMTkuODYgOS41NDAwMiAxOS43MUw4LjE0MDAyIDE3LjI5QzguMDUwMDIgMTcuMTMgOC4wOTAwMiAxNi45NSA4LjIyMDAyIDE2Ljg0TDkuNzAwMDIgMTUuNjhMOS42NTAwMSAxNUw5LjcwMDAyIDE0LjMxTDguMjIwMDIgMTMuMTZDOC4wOTAwMiAxMy4wNSA4LjA1MDAyIDEyLjg2IDguMTQwMDIgMTIuNzFMOS41NDAwMiAxMC4yOUM5LjYyMDAyIDEwLjEzIDkuODEwMDIgMTAuMDcgOS45NjAwMiAxMC4xM0wxMS43MSAxMC44NEMxMi4wNyAxMC41NiAxMi40NiAxMC4zMiAxMi44OSAxMC4xNUwxMy4xNSA4LjI4OTk4QzEzLjE4IDguMTI5OTggMTMuMzIgNy45OTk5OCAxMy41IDcuOTk5OThIMTYuM0MxNi40NyA3Ljk5OTk4IDE2LjYyIDguMTI5OTggMTYuNjQgOC4yODk5OEwxNi45MSAxMC4xNUMxNy4zMyAxMC4zMiAxNy43MyAxMC41NiAxOC4wOSAxMC44NEwxOS44MyAxMC4xM0MyMCAxMC4wNyAyMC4xNyAxMC4xMyAyMC4yNiAxMC4yOUwyMS42NiAxMi43MUMyMS43NSAxMi44NiAyMS43MSAxMy4wNSAyMS41OCAxMy4xNkwyMC4xIDE0LjMxTDIwLjE1IDE1TDIwLjEgMTUuNjhaIi8+CiAgICA8cGF0aCBkPSJNNy4zMjk2NiA3LjQ0NDU0QzguMDgzMSA3LjAwOTU0IDguMzM5MzIgNi4wNTMzMiA3LjkwNDMyIDUuMjk5ODhDNy40NjkzMiA0LjU0NjQzIDYuNTA4MSA0LjI4MTU2IDUuNzU0NjYgNC43MTY1NkM1LjM5MTc2IDQuOTI2MDggNS4xMjY5NSA1LjI3MTE4IDUuMDE4NDkgNS42NzU5NEM0LjkxMDA0IDYuMDgwNzEgNC45NjY4MiA2LjUxMTk4IDUuMTc2MzQgNi44NzQ4OEM1LjYxMTM0IDcuNjI4MzIgNi41NzYyMiA3Ljg3OTU0IDcuMzI5NjYgNy40NDQ1NFpNOS42NTcxOCA0Ljc5NTkzTDEwLjg2NzIgNC45NTE3OUMxMC45NjI4IDQuOTc3NDEgMTEuMDQwMiA1LjA3MTMzIDExLjAzODIgNS4xODc5M0wxMS4wMzg4IDYuOTg4OTNDMTEuMDQ1NSA3LjEwMDU0IDEwLjk2MTYgNy4xOTUxOCAxMC44NTUgNy4yMTA1NEw5LjY2MDAxIDcuMzgwODNMOS4yMzkxNSA4LjEzMTg4TDkuNjY5NjEgOS4yNTc0NUM5LjcwNzI5IDkuMzYyNzEgOS42NjkzNCA5LjQ3Njk5IDkuNTc0MDggOS41MzE5OUw4LjAxNTIzIDEwLjQzMkM3LjkxMTMxIDEwLjQ5MiA3Ljc5MzM3IDEwLjQ2NzcgNy43MjEwNSAxMC4zODI0TDYuOTg3NDggOS40MzE4OEw2LjEwOTMxIDkuNDMwODNMNS4zNDcwNCAxMC4zOTA1QzUuMjg5MDkgMTAuNDcwMiA1LjE3MzgzIDEwLjQ5MDUgNS4wNzE4NyAxMC40MzM5TDMuNTEyNDUgOS41MzI5M0MzLjQxMDQ5IDkuNDc2MzMgMy4zNzY0NyA5LjM1NzQxIDMuNDEwNzUgOS4yNTY3OUwzLjg2MzQ3IDguMTQwOTNMMy42MTc0OSA3Ljc3NDg4TDMuNDIzNDcgNy4zNzg4M0wyLjIzMDc1IDcuMjEyOTdDMi4xMjY0NyA3LjE5MjM1IDIuMDQwNDkgNy4xMDM0MiAyLjA0MjQ1IDYuOTg2ODJMMi4wNDE4NyA1LjE4NTgyQzIuMDQzODMgNS4wNjkyMiAyLjExOTA5IDQuOTc5NTggMi4yMTcwNCA0Ljk2OTIyTDMuNDIwNjUgNC43OTM5M0wzLjg2NzQ5IDQuMDI3ODhMMy40MTEwNSAyLjkxNzMxQzMuMzczMzcgMi44MTIwNCAzLjQxMTMxIDIuNjk3NzYgMy41MTUyMyAyLjYzNzc2TDUuMDc0MDggMS43Mzc3NkM1LjE2OTM0IDEuNjgyNzYgNS4yODcyOSAxLjcwNzA0IDUuMzU5NjEgMS43OTIzMUw2LjExOTE1IDIuNzI3ODhMNi45ODAwMSAyLjczODkzTDcuNzI0OTYgMS43ODkyMkM3Ljc5MTU2IDEuNzA0NTggNy45MTU0OCAxLjY3OTIyIDguMDA4NzkgMS43NDA4Mkw5LjU2ODIxIDIuNjQxODJDOS42NzAxNyAyLjY5ODQyIDkuNzEyODUgMi44MTIzNCA5LjY4NzIzIDIuOTA3OTdMOS4yMTcxOCA0LjAzMzgzTDkuNDYzMTYgNC4zOTk4OEw5LjY1NzE4IDQuNzk1OTNaIi8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-caret-down-empty-thin: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIwIDIwIj4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSIgc2hhcGUtcmVuZGVyaW5nPSJnZW9tZXRyaWNQcmVjaXNpb24iPgoJCTxwb2x5Z29uIGNsYXNzPSJzdDEiIHBvaW50cz0iOS45LDEzLjYgMy42LDcuNCA0LjQsNi42IDkuOSwxMi4yIDE1LjQsNi43IDE2LjEsNy40ICIvPgoJPC9nPgo8L3N2Zz4K);--jp-icon-caret-down-empty: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE4IDE4Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiIHNoYXBlLXJlbmRlcmluZz0iZ2VvbWV0cmljUHJlY2lzaW9uIj4KICAgIDxwYXRoIGQ9Ik01LjIsNS45TDksOS43bDMuOC0zLjhsMS4yLDEuMmwtNC45LDVsLTQuOS01TDUuMiw1Ljl6Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-caret-down: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE4IDE4Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiIHNoYXBlLXJlbmRlcmluZz0iZ2VvbWV0cmljUHJlY2lzaW9uIj4KICAgIDxwYXRoIGQ9Ik01LjIsNy41TDksMTEuMmwzLjgtMy44SDUuMnoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-caret-left: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE4IDE4Ij4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSIgc2hhcGUtcmVuZGVyaW5nPSJnZW9tZXRyaWNQcmVjaXNpb24iPgoJCTxwYXRoIGQ9Ik0xMC44LDEyLjhMNy4xLDlsMy44LTMuOGwwLDcuNkgxMC44eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-caret-right: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE4IDE4Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiIHNoYXBlLXJlbmRlcmluZz0iZ2VvbWV0cmljUHJlY2lzaW9uIj4KICAgIDxwYXRoIGQ9Ik03LjIsNS4yTDEwLjksOWwtMy44LDMuOFY1LjJINy4yeiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-caret-up-empty-thin: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIwIDIwIj4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSIgc2hhcGUtcmVuZGVyaW5nPSJnZW9tZXRyaWNQcmVjaXNpb24iPgoJCTxwb2x5Z29uIGNsYXNzPSJzdDEiIHBvaW50cz0iMTUuNCwxMy4zIDkuOSw3LjcgNC40LDEzLjIgMy42LDEyLjUgOS45LDYuMyAxNi4xLDEyLjYgIi8+Cgk8L2c+Cjwvc3ZnPgo=);--jp-icon-caret-up: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE4IDE4Ij4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSIgc2hhcGUtcmVuZGVyaW5nPSJnZW9tZXRyaWNQcmVjaXNpb24iPgoJCTxwYXRoIGQ9Ik01LjIsMTAuNUw5LDYuOGwzLjgsMy44SDUuMnoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-case-sensitive: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIwIDIwIj4KICA8ZyBjbGFzcz0ianAtaWNvbjIiIGZpbGw9IiM0MTQxNDEiPgogICAgPHJlY3QgeD0iMiIgeT0iMiIgd2lkdGg9IjE2IiBoZWlnaHQ9IjE2Ii8+CiAgPC9nPgogIDxnIGNsYXNzPSJqcC1pY29uLWFjY2VudDIiIGZpbGw9IiNGRkYiPgogICAgPHBhdGggZD0iTTcuNiw4aDAuOWwzLjUsOGgtMS4xTDEwLDE0SDZsLTAuOSwySDRMNy42LDh6IE04LDkuMUw2LjQsMTNoMy4yTDgsOS4xeiIvPgogICAgPHBhdGggZD0iTTE2LjYsOS44Yy0wLjIsMC4xLTAuNCwwLjEtMC43LDAuMWMtMC4yLDAtMC40LTAuMS0wLjYtMC4yYy0wLjEtMC4xLTAuMi0wLjQtMC4yLTAuNyBjLTAuMywwLjMtMC42LDAuNS0wLjksMC43Yy0wLjMsMC4xLTAuNywwLjItMS4xLDAuMmMtMC4zLDAtMC41LDAtMC43LTAuMWMtMC4yLTAuMS0wLjQtMC4yLTAuNi0wLjNjLTAuMi0wLjEtMC4zLTAuMy0wLjQtMC41IGMtMC4xLTAuMi0wLjEtMC40LTAuMS0wLjdjMC0wLjMsMC4xLTAuNiwwLjItMC44YzAuMS0wLjIsMC4zLTAuNCwwLjQtMC41QzEyLDcsMTIuMiw2LjksMTIuNSw2LjhjMC4yLTAuMSwwLjUtMC4xLDAuNy0wLjIgYzAuMy0wLjEsMC41LTAuMSwwLjctMC4xYzAuMiwwLDAuNC0wLjEsMC42LTAuMWMwLjIsMCwwLjMtMC4xLDAuNC0wLjJjMC4xLTAuMSwwLjItMC4yLDAuMi0wLjRjMC0xLTEuMS0xLTEuMy0xIGMtMC40LDAtMS40LDAtMS40LDEuMmgtMC45YzAtMC40LDAuMS0wLjcsMC4yLTFjMC4xLTAuMiwwLjMtMC40LDAuNS0wLjZjMC4yLTAuMiwwLjUtMC4zLDAuOC0wLjNDMTMuMyw0LDEzLjYsNCwxMy45LDQgYzAuMywwLDAuNSwwLDAuOCwwLjFjMC4zLDAsMC41LDAuMSwwLjcsMC4yYzAuMiwwLjEsMC40LDAuMywwLjUsMC41QzE2LDUsMTYsNS4yLDE2LDUuNnYyLjljMCwwLjIsMCwwLjQsMCwwLjUgYzAsMC4xLDAuMSwwLjIsMC4zLDAuMmMwLjEsMCwwLjIsMCwwLjMsMFY5Ljh6IE0xNS4yLDYuOWMtMS4yLDAuNi0zLjEsMC4yLTMuMSwxLjRjMCwxLjQsMy4xLDEsMy4xLTAuNVY2Ljl6Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-check: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIj4KICAgIDxwYXRoIGQ9Ik05IDE2LjE3TDQuODMgMTJsLTEuNDIgMS40MUw5IDE5IDIxIDdsLTEuNDEtMS40MXoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-circle-empty: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTEyIDJDNi40NyAyIDIgNi40NyAyIDEyczQuNDcgMTAgMTAgMTAgMTAtNC40NyAxMC0xMFMxNy41MyAyIDEyIDJ6bTAgMThjLTQuNDEgMC04LTMuNTktOC04czMuNTktOCA4LTggOCAzLjU5IDggOC0zLjU5IDgtOCA4eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-circle: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMTggMTgiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPGNpcmNsZSBjeD0iOSIgY3k9IjkiIHI9IjgiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-clear: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8bWFzayBpZD0iZG9udXRIb2xlIj4KICAgIDxyZWN0IHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgZmlsbD0id2hpdGUiIC8+CiAgICA8Y2lyY2xlIGN4PSIxMiIgY3k9IjEyIiByPSI4IiBmaWxsPSJibGFjayIvPgogIDwvbWFzaz4KCiAgPGcgY2xhc3M9ImpwLWljb24zIiBmaWxsPSIjNjE2MTYxIj4KICAgIDxyZWN0IGhlaWdodD0iMTgiIHdpZHRoPSIyIiB4PSIxMSIgeT0iMyIgdHJhbnNmb3JtPSJyb3RhdGUoMzE1LCAxMiwgMTIpIi8+CiAgICA8Y2lyY2xlIGN4PSIxMiIgY3k9IjEyIiByPSIxMCIgbWFzaz0idXJsKCNkb251dEhvbGUpIi8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-close: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbi1ub25lIGpwLWljb24tc2VsZWN0YWJsZS1pbnZlcnNlIGpwLWljb24zLWhvdmVyIiBmaWxsPSJub25lIj4KICAgIDxjaXJjbGUgY3g9IjEyIiBjeT0iMTIiIHI9IjExIi8+CiAgPC9nPgoKICA8ZyBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIGpwLWljb24tYWNjZW50Mi1ob3ZlciIgZmlsbD0iIzYxNjE2MSI+CiAgICA8cGF0aCBkPSJNMTkgNi40MUwxNy41OSA1IDEyIDEwLjU5IDYuNDEgNSA1IDYuNDEgMTAuNTkgMTIgNSAxNy41OSA2LjQxIDE5IDEyIDEzLjQxIDE3LjU5IDE5IDE5IDE3LjU5IDEzLjQxIDEyeiIvPgogIDwvZz4KCiAgPGcgY2xhc3M9ImpwLWljb24tbm9uZSBqcC1pY29uLWJ1c3kiIGZpbGw9Im5vbmUiPgogICAgPGNpcmNsZSBjeD0iMTIiIGN5PSIxMiIgcj0iNyIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-code: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjIiIGhlaWdodD0iMjIiIHZpZXdCb3g9IjAgMCAyOCAyOCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CgkJPHBhdGggZD0iTTExLjQgMTguNkw2LjggMTRMMTEuNCA5LjRMMTAgOEw0IDE0TDEwIDIwTDExLjQgMTguNlpNMTYuNiAxOC42TDIxLjIgMTRMMTYuNiA5LjRMMTggOEwyNCAxNEwxOCAyMEwxNi42IDE4LjZWMTguNloiLz4KCTwvZz4KPC9zdmc+Cg==);--jp-icon-console: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIwMCAyMDAiPgogIDxnIGNsYXNzPSJqcC1jb25zb2xlLWljb24tYmFja2dyb3VuZC1jb2xvciBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiMwMjg4RDEiPgogICAgPHBhdGggZD0iTTIwIDE5LjhoMTYwdjE1OS45SDIweiIvPgogIDwvZz4KICA8ZyBjbGFzcz0ianAtY29uc29sZS1pY29uLWNvbG9yIGpwLWljb24tc2VsZWN0YWJsZS1pbnZlcnNlIiBmaWxsPSIjZmZmIj4KICAgIDxwYXRoIGQ9Ik0xMDUgMTI3LjNoNDB2MTIuOGgtNDB6TTUxLjEgNzdMNzQgOTkuOWwtMjMuMyAyMy4zIDEwLjUgMTAuNSAyMy4zLTIzLjNMOTUgOTkuOSA4NC41IDg5LjQgNjEuNiA2Ni41eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-copy: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMTggMTgiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTExLjksMUgzLjJDMi40LDEsMS43LDEuNywxLjcsMi41djEwLjJoMS41VjIuNWg4LjdWMXogTTE0LjEsMy45aC04Yy0wLjgsMC0xLjUsMC43LTEuNSwxLjV2MTAuMmMwLDAuOCwwLjcsMS41LDEuNSwxLjVoOCBjMC44LDAsMS41LTAuNywxLjUtMS41VjUuNEMxNS41LDQuNiwxNC45LDMuOSwxNC4xLDMuOXogTTE0LjEsMTUuNWgtOFY1LjRoOFYxNS41eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-copyright: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIGVuYWJsZS1iYWNrZ3JvdW5kPSJuZXcgMCAwIDI0IDI0IiBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCI+CiAgPGcgY2xhc3M9ImpwLWljb24zIiBmaWxsPSIjNjE2MTYxIj4KICAgIDxwYXRoIGQ9Ik0xMS44OCw5LjE0YzEuMjgsMC4wNiwxLjYxLDEuMTUsMS42MywxLjY2aDEuNzljLTAuMDgtMS45OC0xLjQ5LTMuMTktMy40NS0zLjE5QzkuNjQsNy42MSw4LDksOCwxMi4xNCBjMCwxLjk0LDAuOTMsNC4yNCwzLjg0LDQuMjRjMi4yMiwwLDMuNDEtMS42NSwzLjQ0LTIuOTVoLTEuNzljLTAuMDMsMC41OS0wLjQ1LDEuMzgtMS42MywxLjQ0QzEwLjU1LDE0LjgzLDEwLDEzLjgxLDEwLDEyLjE0IEMxMCw5LjI1LDExLjI4LDkuMTYsMTEuODgsOS4xNHogTTEyLDJDNi40OCwyLDIsNi40OCwyLDEyczQuNDgsMTAsMTAsMTBzMTAtNC40OCwxMC0xMFMxNy41MiwyLDEyLDJ6IE0xMiwyMGMtNC40MSwwLTgtMy41OS04LTggczMuNTktOCw4LThzOCwzLjU5LDgsOFMxNi40MSwyMCwxMiwyMHoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-cut: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTkuNjQgNy42NGMuMjMtLjUuMzYtMS4wNS4zNi0xLjY0IDAtMi4yMS0xLjc5LTQtNC00UzIgMy43OSAyIDZzMS43OSA0IDQgNGMuNTkgMCAxLjE0LS4xMyAxLjY0LS4zNkwxMCAxMmwtMi4zNiAyLjM2QzcuMTQgMTQuMTMgNi41OSAxNCA2IDE0Yy0yLjIxIDAtNCAxLjc5LTQgNHMxLjc5IDQgNCA0IDQtMS43OSA0LTRjMC0uNTktLjEzLTEuMTQtLjM2LTEuNjRMMTIgMTRsNyA3aDN2LTFMOS42NCA3LjY0ek02IDhjLTEuMSAwLTItLjg5LTItMnMuOS0yIDItMiAyIC44OSAyIDItLjkgMi0yIDJ6bTAgMTJjLTEuMSAwLTItLjg5LTItMnMuOS0yIDItMiAyIC44OSAyIDItLjkgMi0yIDJ6bTYtNy41Yy0uMjggMC0uNS0uMjItLjUtLjVzLjIyLS41LjUtLjUuNS4yMi41LjUtLjIyLjUtLjUuNXpNMTkgM2wtNiA2IDIgMiA3LTdWM3oiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-delete: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCIgd2lkdGg9IjE2cHgiIGhlaWdodD0iMTZweCI+CiAgICA8cGF0aCBkPSJNMCAwaDI0djI0SDB6IiBmaWxsPSJub25lIiAvPgogICAgPHBhdGggY2xhc3M9ImpwLWljb24zIiBmaWxsPSIjNjI2MjYyIiBkPSJNNiAxOWMwIDEuMS45IDIgMiAyaDhjMS4xIDAgMi0uOSAyLTJWN0g2djEyek0xOSA0aC0zLjVsLTEtMWgtNWwtMSAxSDV2MmgxNFY0eiIgLz4KPC9zdmc+Cg==);--jp-icon-download: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTE5IDloLTRWM0g5djZINWw3IDcgNy03ek01IDE4djJoMTR2LTJINXoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-duplicate: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTQiIHZpZXdCb3g9IjAgMCAxNCAxNCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggY2xhc3M9ImpwLWljb24zIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTIuNzk5OTggMC44NzVIOC44OTU4MkM5LjIwMDYxIDAuODc1IDkuNDQ5OTggMS4xMzkxNCA5LjQ0OTk4IDEuNDYxOThDOS40NDk5OCAxLjc4NDgyIDkuMjAwNjEgMi4wNDg5NiA4Ljg5NTgyIDIuMDQ4OTZIMy4zNTQxNUMzLjA0OTM2IDIuMDQ4OTYgMi43OTk5OCAyLjMxMzEgMi43OTk5OCAyLjYzNTk0VjkuNjc5NjlDMi43OTk5OCAxMC4wMDI1IDIuNTUwNjEgMTAuMjY2NyAyLjI0NTgyIDEwLjI2NjdDMS45NDEwMyAxMC4yNjY3IDEuNjkxNjUgMTAuMDAyNSAxLjY5MTY1IDkuNjc5NjlWMi4wNDg5NkMxLjY5MTY1IDEuNDAzMjggMi4xOTA0IDAuODc1IDIuNzk5OTggMC44NzVaTTUuMzY2NjUgMTEuOVY0LjU1SDExLjA4MzNWMTEuOUg1LjM2NjY1Wk00LjE0MTY1IDQuMTQxNjdDNC4xNDE2NSAzLjY5MDYzIDQuNTA3MjggMy4zMjUgNC45NTgzMiAzLjMyNUgxMS40OTE3QzExLjk0MjcgMy4zMjUgMTIuMzA4MyAzLjY5MDYzIDEyLjMwODMgNC4xNDE2N1YxMi4zMDgzQzEyLjMwODMgMTIuNzU5NCAxMS45NDI3IDEzLjEyNSAxMS40OTE3IDEzLjEyNUg0Ljk1ODMyQzQuNTA3MjggMTMuMTI1IDQuMTQxNjUgMTIuNzU5NCA0LjE0MTY1IDEyLjMwODNWNC4xNDE2N1oiIGZpbGw9IiM2MTYxNjEiLz4KPHBhdGggY2xhc3M9ImpwLWljb24zIiBkPSJNOS40MzU3NCA4LjI2NTA3SDguMzY0MzFWOS4zMzY1QzguMzY0MzEgOS40NTQzNSA4LjI2Nzg4IDkuNTUwNzggOC4xNTAwMiA5LjU1MDc4QzguMDMyMTcgOS41NTA3OCA3LjkzNTc0IDkuNDU0MzUgNy45MzU3NCA5LjMzNjVWOC4yNjUwN0g2Ljg2NDMxQzYuNzQ2NDUgOC4yNjUwNyA2LjY1MDAyIDguMTY4NjQgNi42NTAwMiA4LjA1MDc4QzYuNjUwMDIgNy45MzI5MiA2Ljc0NjQ1IDcuODM2NSA2Ljg2NDMxIDcuODM2NUg3LjkzNTc0VjYuNzY1MDdDNy45MzU3NCA2LjY0NzIxIDguMDMyMTcgNi41NTA3OCA4LjE1MDAyIDYuNTUwNzhDOC4yNjc4OCA2LjU1MDc4IDguMzY0MzEgNi42NDcyMSA4LjM2NDMxIDYuNzY1MDdWNy44MzY1SDkuNDM1NzRDOS41NTM2IDcuODM2NSA5LjY1MDAyIDcuOTMyOTIgOS42NTAwMiA4LjA1MDc4QzkuNjUwMDIgOC4xNjg2NCA5LjU1MzYgOC4yNjUwNyA5LjQzNTc0IDguMjY1MDdaIiBmaWxsPSIjNjE2MTYxIiBzdHJva2U9IiM2MTYxNjEiIHN0cm9rZS13aWR0aD0iMC41Ii8+Cjwvc3ZnPgo=);--jp-icon-edit: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTMgMTcuMjVWMjFoMy43NUwxNy44MSA5Ljk0bC0zLjc1LTMuNzVMMyAxNy4yNXpNMjAuNzEgNy4wNGMuMzktLjM5LjM5LTEuMDIgMC0xLjQxbC0yLjM0LTIuMzRjLS4zOS0uMzktMS4wMi0uMzktMS40MSAwbC0xLjgzIDEuODMgMy43NSAzLjc1IDEuODMtMS44M3oiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-ellipses: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPGNpcmNsZSBjeD0iNSIgY3k9IjEyIiByPSIyIi8+CiAgICA8Y2lyY2xlIGN4PSIxMiIgY3k9IjEyIiByPSIyIi8+CiAgICA8Y2lyY2xlIGN4PSIxOSIgY3k9IjEyIiByPSIyIi8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-extension: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTIwLjUgMTFIMTlWN2MwLTEuMS0uOS0yLTItMmgtNFYzLjVDMTMgMi4xMiAxMS44OCAxIDEwLjUgMVM4IDIuMTIgOCAzLjVWNUg0Yy0xLjEgMC0xLjk5LjktMS45OSAydjMuOEgzLjVjMS40OSAwIDIuNyAxLjIxIDIuNyAyLjdzLTEuMjEgMi43LTIuNyAyLjdIMlYyMGMwIDEuMS45IDIgMiAyaDMuOHYtMS41YzAtMS40OSAxLjIxLTIuNyAyLjctMi43IDEuNDkgMCAyLjcgMS4yMSAyLjcgMi43VjIySDE3YzEuMSAwIDItLjkgMi0ydi00aDEuNWMxLjM4IDAgMi41LTEuMTIgMi41LTIuNVMyMS44OCAxMSAyMC41IDExeiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-fast-forward: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTQgMThsOC41LTZMNCA2djEyem05LTEydjEybDguNS02TDEzIDZ6Ii8+CiAgICA8L2c+Cjwvc3ZnPgo=);--jp-icon-file-upload: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTkgMTZoNnYtNmg0bC03LTctNyA3aDR6bS00IDJoMTR2Mkg1eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-file: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8cGF0aCBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIiBkPSJNMTkuMyA4LjJsLTUuNS01LjVjLS4zLS4zLS43LS41LTEuMi0uNUgzLjljLS44LjEtMS42LjktMS42IDEuOHYxNC4xYzAgLjkuNyAxLjYgMS42IDEuNmgxNC4yYy45IDAgMS42LS43IDEuNi0xLjZWOS40Yy4xLS41LS4xLS45LS40LTEuMnptLTUuOC0zLjNsMy40IDMuNmgtMy40VjQuOXptMy45IDEyLjdINC43Yy0uMSAwLS4yIDAtLjItLjJWNC43YzAtLjIuMS0uMy4yLS4zaDcuMnY0LjRzMCAuOC4zIDEuMWMuMy4zIDEuMS4zIDEuMS4zaDQuM3Y3LjJzLS4xLjItLjIuMnoiLz4KPC9zdmc+Cg==);--jp-icon-filter-list: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTEwIDE4aDR2LTJoLTR2MnpNMyA2djJoMThWNkgzem0zIDdoMTJ2LTJINnYyeiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-folder-favorite: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIGhlaWdodD0iMjRweCIgdmlld0JveD0iMCAwIDI0IDI0IiB3aWR0aD0iMjRweCIgZmlsbD0iIzAwMDAwMCI+CiAgPHBhdGggZD0iTTAgMGgyNHYyNEgwVjB6IiBmaWxsPSJub25lIi8+PHBhdGggY2xhc3M9ImpwLWljb24zIGpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iIzYxNjE2MSIgZD0iTTIwIDZoLThsLTItMkg0Yy0xLjEgMC0yIC45LTIgMnYxMmMwIDEuMS45IDIgMiAyaDE2YzEuMSAwIDItLjkgMi0yVjhjMC0xLjEtLjktMi0yLTJ6bS0yLjA2IDExTDE1IDE1LjI4IDEyLjA2IDE3bC43OC0zLjMzLTIuNTktMi4yNCAzLjQxLS4yOUwxNSA4bDEuMzQgMy4xNCAzLjQxLjI5LTIuNTkgMi4yNC43OCAzLjMzeiIvPgo8L3N2Zz4K);--jp-icon-folder: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aCBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIiBkPSJNMTAgNEg0Yy0xLjEgMC0xLjk5LjktMS45OSAyTDIgMThjMCAxLjEuOSAyIDIgMmgxNmMxLjEgMCAyLS45IDItMlY4YzAtMS4xLS45LTItMi0yaC04bC0yLTJ6Ii8+Cjwvc3ZnPgo=);--jp-icon-home: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIGhlaWdodD0iMjRweCIgdmlld0JveD0iMCAwIDI0IDI0IiB3aWR0aD0iMjRweCIgZmlsbD0iIzAwMDAwMCI+CiAgPHBhdGggZD0iTTAgMGgyNHYyNEgweiIgZmlsbD0ibm9uZSIvPjxwYXRoIGNsYXNzPSJqcC1pY29uMyBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiM2MTYxNjEiIGQ9Ik0xMCAyMHYtNmg0djZoNXYtOGgzTDEyIDMgMiAxMmgzdjh6Ii8+Cjwvc3ZnPgo=);--jp-icon-html5: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDUxMiA1MTIiPgogIDxwYXRoIGNsYXNzPSJqcC1pY29uMCBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiMwMDAiIGQ9Ik0xMDguNCAwaDIzdjIyLjhoMjEuMlYwaDIzdjY5aC0yM1Y0NmgtMjF2MjNoLTIzLjJNMjA2IDIzaC0yMC4zVjBoNjMuN3YyM0gyMjl2NDZoLTIzbTUzLjUtNjloMjQuMWwxNC44IDI0LjNMMzEzLjIgMGgyNC4xdjY5aC0yM1YzNC44bC0xNi4xIDI0LjgtMTYuMS0yNC44VjY5aC0yMi42bTg5LjItNjloMjN2NDYuMmgzMi42VjY5aC01NS42Ii8+CiAgPHBhdGggY2xhc3M9ImpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iI2U0NGQyNiIgZD0iTTEwNy42IDQ3MWwtMzMtMzcwLjRoMzYyLjhsLTMzIDM3MC4yTDI1NS43IDUxMiIvPgogIDxwYXRoIGNsYXNzPSJqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiNmMTY1MjkiIGQ9Ik0yNTYgNDgwLjVWMTMxaDE0OC4zTDM3NiA0NDciLz4KICA8cGF0aCBjbGFzcz0ianAtaWNvbi1zZWxlY3RhYmxlLWludmVyc2UiIGZpbGw9IiNlYmViZWIiIGQ9Ik0xNDIgMTc2LjNoMTE0djQ1LjRoLTY0LjJsNC4yIDQ2LjVoNjB2NDUuM0gxNTQuNG0yIDIyLjhIMjAybDMuMiAzNi4zIDUwLjggMTMuNnY0Ny40bC05My4yLTI2Ii8+CiAgPHBhdGggY2xhc3M9ImpwLWljb24tc2VsZWN0YWJsZS1pbnZlcnNlIiBmaWxsPSIjZmZmIiBkPSJNMzY5LjYgMTc2LjNIMjU1Ljh2NDUuNGgxMDkuNm0tNC4xIDQ2LjVIMjU1Ljh2NDUuNGg1NmwtNS4zIDU5LTUwLjcgMTMuNnY0Ny4ybDkzLTI1LjgiLz4KPC9zdmc+Cg==);--jp-icon-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8cGF0aCBjbGFzcz0ianAtaWNvbi1icmFuZDQganAtaWNvbi1zZWxlY3RhYmxlLWludmVyc2UiIGZpbGw9IiNGRkYiIGQ9Ik0yLjIgMi4yaDE3LjV2MTcuNUgyLjJ6Ii8+CiAgPHBhdGggY2xhc3M9ImpwLWljb24tYnJhbmQwIGpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iIzNGNTFCNSIgZD0iTTIuMiAyLjJ2MTcuNWgxNy41bC4xLTE3LjVIMi4yem0xMi4xIDIuMmMxLjIgMCAyLjIgMSAyLjIgMi4ycy0xIDIuMi0yLjIgMi4yLTIuMi0xLTIuMi0yLjIgMS0yLjIgMi4yLTIuMnpNNC40IDE3LjZsMy4zLTguOCAzLjMgNi42IDIuMi0zLjIgNC40IDUuNEg0LjR6Ii8+Cjwvc3ZnPgo=);--jp-icon-inspector: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aCBjbGFzcz0ianAtaW5zcGVjdG9yLWljb24tY29sb3IganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIiBkPSJNMjAgNEg0Yy0xLjEgMC0xLjk5LjktMS45OSAyTDIgMThjMCAxLjEuOSAyIDIgMmgxNmMxLjEgMCAyLS45IDItMlY2YzAtMS4xLS45LTItMi0yem0tNSAxNEg0di00aDExdjR6bTAtNUg0VjloMTF2NHptNSA1aC00VjloNHY5eiIvPgo8L3N2Zz4K);--jp-icon-json: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8ZyBjbGFzcz0ianAtanNvbi1pY29uLWNvbG9yIGpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iI0Y5QTgyNSI+CiAgICA8cGF0aCBkPSJNMjAuMiAxMS44Yy0xLjYgMC0xLjcuNS0xLjcgMSAwIC40LjEuOS4xIDEuMy4xLjUuMS45LjEgMS4zIDAgMS43LTEuNCAyLjMtMy41IDIuM2gtLjl2LTEuOWguNWMxLjEgMCAxLjQgMCAxLjQtLjggMC0uMyAwLS42LS4xLTEgMC0uNC0uMS0uOC0uMS0xLjIgMC0xLjMgMC0xLjggMS4zLTItMS4zLS4yLTEuMy0uNy0xLjMtMiAwLS40LjEtLjguMS0xLjIuMS0uNC4xLS43LjEtMSAwLS44LS40LS43LTEuNC0uOGgtLjVWNC4xaC45YzIuMiAwIDMuNS43IDMuNSAyLjMgMCAuNC0uMS45LS4xIDEuMy0uMS41LS4xLjktLjEgMS4zIDAgLjUuMiAxIDEuNyAxdjEuOHpNMS44IDEwLjFjMS42IDAgMS43LS41IDEuNy0xIDAtLjQtLjEtLjktLjEtMS4zLS4xLS41LS4xLS45LS4xLTEuMyAwLTEuNiAxLjQtMi4zIDMuNS0yLjNoLjl2MS45aC0uNWMtMSAwLTEuNCAwLTEuNC44IDAgLjMgMCAuNi4xIDEgMCAuMi4xLjYuMSAxIDAgMS4zIDAgMS44LTEuMyAyQzYgMTEuMiA2IDExLjcgNiAxM2MwIC40LS4xLjgtLjEgMS4yLS4xLjMtLjEuNy0uMSAxIDAgLjguMy44IDEuNC44aC41djEuOWgtLjljLTIuMSAwLTMuNS0uNi0zLjUtMi4zIDAtLjQuMS0uOS4xLTEuMy4xLS41LjEtLjkuMS0xLjMgMC0uNS0uMi0xLTEuNy0xdi0xLjl6Ii8+CiAgICA8Y2lyY2xlIGN4PSIxMSIgY3k9IjEzLjgiIHI9IjIuMSIvPgogICAgPGNpcmNsZSBjeD0iMTEiIGN5PSI4LjIiIHI9IjIuMSIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-julia: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDMyNSAzMDAiPgogIDxnIGNsYXNzPSJqcC1icmFuZDAganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjY2IzYzMzIj4KICAgIDxwYXRoIGQ9Ik0gMTUwLjg5ODQzOCAyMjUgQyAxNTAuODk4NDM4IDI2Ni40MjE4NzUgMTE3LjMyMDMxMiAzMDAgNzUuODk4NDM4IDMwMCBDIDM0LjQ3NjU2MiAzMDAgMC44OTg0MzggMjY2LjQyMTg3NSAwLjg5ODQzOCAyMjUgQyAwLjg5ODQzOCAxODMuNTc4MTI1IDM0LjQ3NjU2MiAxNTAgNzUuODk4NDM4IDE1MCBDIDExNy4zMjAzMTIgMTUwIDE1MC44OTg0MzggMTgzLjU3ODEyNSAxNTAuODk4NDM4IDIyNSIvPgogIDwvZz4KICA8ZyBjbGFzcz0ianAtYnJhbmQwIGpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iIzM4OTgyNiI+CiAgICA8cGF0aCBkPSJNIDIzNy41IDc1IEMgMjM3LjUgMTE2LjQyMTg3NSAyMDMuOTIxODc1IDE1MCAxNjIuNSAxNTAgQyAxMjEuMDc4MTI1IDE1MCA4Ny41IDExNi40MjE4NzUgODcuNSA3NSBDIDg3LjUgMzMuNTc4MTI1IDEyMS4wNzgxMjUgMCAxNjIuNSAwIEMgMjAzLjkyMTg3NSAwIDIzNy41IDMzLjU3ODEyNSAyMzcuNSA3NSIvPgogIDwvZz4KICA8ZyBjbGFzcz0ianAtYnJhbmQwIGpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iIzk1NThiMiI+CiAgICA8cGF0aCBkPSJNIDMyNC4xMDE1NjIgMjI1IEMgMzI0LjEwMTU2MiAyNjYuNDIxODc1IDI5MC41MjM0MzggMzAwIDI0OS4xMDE1NjIgMzAwIEMgMjA3LjY3OTY4OCAzMDAgMTc0LjEwMTU2MiAyNjYuNDIxODc1IDE3NC4xMDE1NjIgMjI1IEMgMTc0LjEwMTU2MiAxODMuNTc4MTI1IDIwNy42Nzk2ODggMTUwIDI0OS4xMDE1NjIgMTUwIEMgMjkwLjUyMzQzOCAxNTAgMzI0LjEwMTU2MiAxODMuNTc4MTI1IDMyNC4xMDE1NjIgMjI1Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-jupyter-favicon: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTUyIiBoZWlnaHQ9IjE2NSIgdmlld0JveD0iMCAwIDE1MiAxNjUiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgPGcgY2xhc3M9ImpwLWp1cHl0ZXItaWNvbi1jb2xvciIgZmlsbD0iI0YzNzcyNiI+CiAgICA8cGF0aCB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwLjA3ODk0NywgMTEwLjU4MjkyNykiIGQ9Ik03NS45NDIyODQyLDI5LjU4MDQ1NjEgQzQzLjMwMjM5NDcsMjkuNTgwNDU2MSAxNC43OTY3ODMyLDE3LjY1MzQ2MzQgMCwwIEM1LjUxMDgzMjExLDE1Ljg0MDY4MjkgMTUuNzgxNTM4OSwyOS41NjY3NzMyIDI5LjM5MDQ5NDcsMzkuMjc4NDE3MSBDNDIuOTk5Nyw0OC45ODk4NTM3IDU5LjI3MzcsNTQuMjA2NzgwNSA3NS45NjA1Nzg5LDU0LjIwNjc4MDUgQzkyLjY0NzQ1NzksNTQuMjA2NzgwNSAxMDguOTIxNDU4LDQ4Ljk4OTg1MzcgMTIyLjUzMDY2MywzOS4yNzg0MTcxIEMxMzYuMTM5NDUzLDI5LjU2Njc3MzIgMTQ2LjQxMDI4NCwxNS44NDA2ODI5IDE1MS45MjExNTgsMCBDMTM3LjA4Nzg2OCwxNy42NTM0NjM0IDEwOC41ODI1ODksMjkuNTgwNDU2MSA3NS45NDIyODQyLDI5LjU4MDQ1NjEgTDc1Ljk0MjI4NDIsMjkuNTgwNDU2MSBaIiAvPgogICAgPHBhdGggdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMC4wMzczNjgsIDAuNzA0ODc4KSIgZD0iTTc1Ljk3ODQ1NzksMjQuNjI2NDA3MyBDMTA4LjYxODc2MywyNC42MjY0MDczIDEzNy4xMjQ0NTgsMzYuNTUzNDQxNSAxNTEuOTIxMTU4LDU0LjIwNjc4MDUgQzE0Ni40MTAyODQsMzguMzY2MjIyIDEzNi4xMzk0NTMsMjQuNjQwMTMxNyAxMjIuNTMwNjYzLDE0LjkyODQ4NzggQzEwOC45MjE0NTgsNS4yMTY4NDM5IDkyLjY0NzQ1NzksMCA3NS45NjA1Nzg5LDAgQzU5LjI3MzcsMCA0Mi45OTk3LDUuMjE2ODQzOSAyOS4zOTA0OTQ3LDE0LjkyODQ4NzggQzE1Ljc4MTUzODksMjQuNjQwMTMxNyA1LjUxMDgzMjExLDM4LjM2NjIyMiAwLDU0LjIwNjc4MDUgQzE0LjgzMzA4MTYsMzYuNTg5OTI5MyA0My4zMzg1Njg0LDI0LjYyNjQwNzMgNzUuOTc4NDU3OSwyNC42MjY0MDczIEw3NS45Nzg0NTc5LDI0LjYyNjQwNzMgWiIgLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-jupyter: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMzkiIGhlaWdodD0iNTEiIHZpZXdCb3g9IjAgMCAzOSA1MSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgtMTYzOCAtMjI4MSkiPgogICAgIDxnIGNsYXNzPSJqcC1qdXB5dGVyLWljb24tY29sb3IiIGZpbGw9IiNGMzc3MjYiPgogICAgICA8cGF0aCB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxNjM5Ljc0IDIzMTEuOTgpIiBkPSJNIDE4LjI2NDYgNy4xMzQxMUMgMTAuNDE0NSA3LjEzNDExIDMuNTU4NzIgNC4yNTc2IDAgMEMgMS4zMjUzOSAzLjgyMDQgMy43OTU1NiA3LjEzMDgxIDcuMDY4NiA5LjQ3MzAzQyAxMC4zNDE3IDExLjgxNTIgMTQuMjU1NyAxMy4wNzM0IDE4LjI2OSAxMy4wNzM0QyAyMi4yODIzIDEzLjA3MzQgMjYuMTk2MyAxMS44MTUyIDI5LjQ2OTQgOS40NzMwM0MgMzIuNzQyNCA3LjEzMDgxIDM1LjIxMjYgMy44MjA0IDM2LjUzOCAwQyAzMi45NzA1IDQuMjU3NiAyNi4xMTQ4IDcuMTM0MTEgMTguMjY0NiA3LjEzNDExWiIvPgogICAgICA8cGF0aCB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxNjM5LjczIDIyODUuNDgpIiBkPSJNIDE4LjI3MzMgNS45MzkzMUMgMjYuMTIzNSA1LjkzOTMxIDMyLjk3OTMgOC44MTU4MyAzNi41MzggMTMuMDczNEMgMzUuMjEyNiA5LjI1MzAzIDMyLjc0MjQgNS45NDI2MiAyOS40Njk0IDMuNjAwNEMgMjYuMTk2MyAxLjI1ODE4IDIyLjI4MjMgMCAxOC4yNjkgMEMgMTQuMjU1NyAwIDEwLjM0MTcgMS4yNTgxOCA3LjA2ODYgMy42MDA0QyAzLjc5NTU2IDUuOTQyNjIgMS4zMjUzOSA5LjI1MzAzIDAgMTMuMDczNEMgMy41Njc0NSA4LjgyNDYzIDEwLjQyMzIgNS45MzkzMSAxOC4yNzMzIDUuOTM5MzFaIi8+CiAgICA8L2c+CiAgICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgICA8cGF0aCB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxNjY5LjMgMjI4MS4zMSkiIGQ9Ik0gNS44OTM1MyAyLjg0NEMgNS45MTg4OSAzLjQzMTY1IDUuNzcwODUgNC4wMTM2NyA1LjQ2ODE1IDQuNTE2NDVDIDUuMTY1NDUgNS4wMTkyMiA0LjcyMTY4IDUuNDIwMTUgNC4xOTI5OSA1LjY2ODUxQyAzLjY2NDMgNS45MTY4OCAzLjA3NDQ0IDYuMDAxNTEgMi40OTgwNSA1LjkxMTcxQyAxLjkyMTY2IDUuODIxOSAxLjM4NDYzIDUuNTYxNyAwLjk1NDg5OCA1LjE2NDAxQyAwLjUyNTE3IDQuNzY2MzMgMC4yMjIwNTYgNC4yNDkwMyAwLjA4MzkwMzcgMy42Nzc1N0MgLTAuMDU0MjQ4MyAzLjEwNjExIC0wLjAyMTIzIDIuNTA2MTcgMC4xNzg3ODEgMS45NTM2NEMgMC4zNzg3OTMgMS40MDExIDAuNzM2ODA5IDAuOTIwODE3IDEuMjA3NTQgMC41NzM1MzhDIDEuNjc4MjYgMC4yMjYyNTkgMi4yNDA1NSAwLjAyNzU5MTkgMi44MjMyNiAwLjAwMjY3MjI5QyAzLjYwMzg5IC0wLjAzMDcxMTUgNC4zNjU3MyAwLjI0OTc4OSA0Ljk0MTQyIDAuNzgyNTUxQyA1LjUxNzExIDEuMzE1MzEgNS44NTk1NiAyLjA1Njc2IDUuODkzNTMgMi44NDRaIi8+CiAgICAgIDxwYXRoIHRyYW5zZm9ybT0idHJhbnNsYXRlKDE2MzkuOCAyMzIzLjgxKSIgZD0iTSA3LjQyNzg5IDMuNTgzMzhDIDcuNDYwMDggNC4zMjQzIDcuMjczNTUgNS4wNTgxOSA2Ljg5MTkzIDUuNjkyMTNDIDYuNTEwMzEgNi4zMjYwNyA1Ljk1MDc1IDYuODMxNTYgNS4yODQxMSA3LjE0NDZDIDQuNjE3NDcgNy40NTc2MyAzLjg3MzcxIDcuNTY0MTQgMy4xNDcwMiA3LjQ1MDYzQyAyLjQyMDMyIDcuMzM3MTIgMS43NDMzNiA3LjAwODcgMS4yMDE4NCA2LjUwNjk1QyAwLjY2MDMyOCA2LjAwNTIgMC4yNzg2MSA1LjM1MjY4IDAuMTA1MDE3IDQuNjMyMDJDIC0wLjA2ODU3NTcgMy45MTEzNSAtMC4wMjYyMzYxIDMuMTU0OTQgMC4yMjY2NzUgMi40NTg1NkMgMC40Nzk1ODcgMS43NjIxNyAwLjkzMTY5NyAxLjE1NzEzIDEuNTI1NzYgMC43MjAwMzNDIDIuMTE5ODMgMC4yODI5MzUgMi44MjkxNCAwLjAzMzQzOTUgMy41NjM4OSAwLjAwMzEzMzQ0QyA0LjU0NjY3IC0wLjAzNzQwMzMgNS41MDUyOSAwLjMxNjcwNiA2LjIyOTYxIDAuOTg3ODM1QyA2Ljk1MzkzIDEuNjU4OTYgNy4zODQ4NCAyLjU5MjM1IDcuNDI3ODkgMy41ODMzOEwgNy40Mjc4OSAzLjU4MzM4WiIvPgogICAgICA8cGF0aCB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxNjM4LjM2IDIyODYuMDYpIiBkPSJNIDIuMjc0NzEgNC4zOTYyOUMgMS44NDM2MyA0LjQxNTA4IDEuNDE2NzEgNC4zMDQ0NSAxLjA0Nzk5IDQuMDc4NDNDIDAuNjc5MjY4IDMuODUyNCAwLjM4NTMyOCAzLjUyMTE0IDAuMjAzMzcxIDMuMTI2NTZDIDAuMDIxNDEzNiAyLjczMTk4IC0wLjA0MDM3OTggMi4yOTE4MyAwLjAyNTgxMTYgMS44NjE4MUMgMC4wOTIwMDMxIDEuNDMxOCAwLjI4MzIwNCAxLjAzMTI2IDAuNTc1MjEzIDAuNzEwODgzQyAwLjg2NzIyMiAwLjM5MDUxIDEuMjQ2OTEgMC4xNjQ3MDggMS42NjYyMiAwLjA2MjA1OTJDIDIuMDg1NTMgLTAuMDQwNTg5NyAyLjUyNTYxIC0wLjAxNTQ3MTQgMi45MzA3NiAwLjEzNDIzNUMgMy4zMzU5MSAwLjI4Mzk0MSAzLjY4NzkyIDAuNTUxNTA1IDMuOTQyMjIgMC45MDMwNkMgNC4xOTY1MiAxLjI1NDYyIDQuMzQxNjkgMS42NzQzNiA0LjM1OTM1IDIuMTA5MTZDIDQuMzgyOTkgMi42OTEwNyA0LjE3Njc4IDMuMjU4NjkgMy43ODU5NyAzLjY4NzQ2QyAzLjM5NTE2IDQuMTE2MjQgMi44NTE2NiA0LjM3MTE2IDIuMjc0NzEgNC4zOTYyOUwgMi4yNzQ3MSA0LjM5NjI5WiIvPgogICAgPC9nPgogIDwvZz4+Cjwvc3ZnPgo=);--jp-icon-jupyterlab-wordmark: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyMDAiIHZpZXdCb3g9IjAgMCAxODYwLjggNDc1Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjIiIGZpbGw9IiM0RTRFNEUiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDQ4MC4xMzY0MDEsIDY0LjI3MTQ5MykiPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMC4wMDAwMDAsIDU4Ljg3NTU2NikiPgogICAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgwLjA4NzYwMywgMC4xNDAyOTQpIj4KICAgICAgICA8cGF0aCBkPSJNLTQyNi45LDE2OS44YzAsNDguNy0zLjcsNjQuNy0xMy42LDc2LjRjLTEwLjgsMTAtMjUsMTUuNS0zOS43LDE1LjVsMy43LDI5IGMyMi44LDAuMyw0NC44LTcuOSw2MS45LTIzLjFjMTcuOC0xOC41LDI0LTQ0LjEsMjQtODMuM1YwSC00Mjd2MTcwLjFMLTQyNi45LDE2OS44TC00MjYuOSwxNjkuOHoiLz4KICAgICAgPC9nPgogICAgPC9nPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMTU1LjA0NTI5NiwgNTYuODM3MTA0KSI+CiAgICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDEuNTYyNDUzLCAxLjc5OTg0MikiPgogICAgICAgIDxwYXRoIGQ9Ik0tMzEyLDE0OGMwLDIxLDAsMzkuNSwxLjcsNTUuNGgtMzEuOGwtMi4xLTMzLjNoLTAuOGMtNi43LDExLjYtMTYuNCwyMS4zLTI4LDI3LjkgYy0xMS42LDYuNi0yNC44LDEwLTM4LjIsOS44Yy0zMS40LDAtNjktMTcuNy02OS04OVYwaDM2LjR2MTEyLjdjMCwzOC43LDExLjYsNjQuNyw0NC42LDY0LjdjMTAuMy0wLjIsMjAuNC0zLjUsMjguOS05LjQgYzguNS01LjksMTUuMS0xNC4zLDE4LjktMjMuOWMyLjItNi4xLDMuMy0xMi41LDMuMy0xOC45VjAuMmgzNi40VjE0OEgtMzEyTC0zMTIsMTQ4eiIvPgogICAgICA8L2c+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgzOTAuMDEzMzIyLCA1My40Nzk2MzgpIj4KICAgICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMS43MDY0NTgsIDAuMjMxNDI1KSI+CiAgICAgICAgPHBhdGggZD0iTS00NzguNiw3MS40YzAtMjYtMC44LTQ3LTEuNy02Ni43aDMyLjdsMS43LDM0LjhoMC44YzcuMS0xMi41LDE3LjUtMjIuOCwzMC4xLTI5LjcgYzEyLjUtNywyNi43LTEwLjMsNDEtOS44YzQ4LjMsMCw4NC43LDQxLjcsODQuNywxMDMuM2MwLDczLjEtNDMuNywxMDkuMi05MSwxMDkuMmMtMTIuMSwwLjUtMjQuMi0yLjItMzUtNy44IGMtMTAuOC01LjYtMTkuOS0xMy45LTI2LjYtMjQuMmgtMC44VjI5MWgtMzZ2LTIyMEwtNDc4LjYsNzEuNEwtNDc4LjYsNzEuNHogTS00NDIuNiwxMjUuNmMwLjEsNS4xLDAuNiwxMC4xLDEuNywxNS4xIGMzLDEyLjMsOS45LDIzLjMsMTkuOCwzMS4xYzkuOSw3LjgsMjIuMSwxMi4xLDM0LjcsMTIuMWMzOC41LDAsNjAuNy0zMS45LDYwLjctNzguNWMwLTQwLjctMjEuMS03NS42LTU5LjUtNzUuNiBjLTEyLjksMC40LTI1LjMsNS4xLTM1LjMsMTMuNGMtOS45LDguMy0xNi45LDE5LjctMTkuNiwzMi40Yy0xLjUsNC45LTIuMywxMC0yLjUsMTUuMVYxMjUuNkwtNDQyLjYsMTI1LjZMLTQ0Mi42LDEyNS42eiIvPgogICAgICA8L2c+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSg2MDYuNzQwNzI2LCA1Ni44MzcxMDQpIj4KICAgICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMC43NTEyMjYsIDEuOTg5Mjk5KSI+CiAgICAgICAgPHBhdGggZD0iTS00NDAuOCwwbDQzLjcsMTIwLjFjNC41LDEzLjQsOS41LDI5LjQsMTIuOCw0MS43aDAuOGMzLjctMTIuMiw3LjktMjcuNywxMi44LTQyLjQgbDM5LjctMTE5LjJoMzguNUwtMzQ2LjksMTQ1Yy0yNiw2OS43LTQzLjcsMTA1LjQtNjguNiwxMjcuMmMtMTIuNSwxMS43LTI3LjksMjAtNDQuNiwyMy45bC05LjEtMzEuMSBjMTEuNy0zLjksMjIuNS0xMC4xLDMxLjgtMTguMWMxMy4yLTExLjEsMjMuNy0yNS4yLDMwLjYtNDEuMmMxLjUtMi44LDIuNS01LjcsMi45LTguOGMtMC4zLTMuMy0xLjItNi42LTIuNS05LjdMLTQ4MC4yLDAuMSBoMzkuN0wtNDQwLjgsMEwtNDQwLjgsMHoiLz4KICAgICAgPC9nPgogICAgPC9nPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoODIyLjc0ODEwNCwgMC4wMDAwMDApIj4KICAgICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMS40NjQwNTAsIDAuMzc4OTE0KSI+CiAgICAgICAgPHBhdGggZD0iTS00MTMuNywwdjU4LjNoNTJ2MjguMmgtNTJWMTk2YzAsMjUsNywzOS41LDI3LjMsMzkuNWM3LjEsMC4xLDE0LjItMC43LDIxLjEtMi41IGwxLjcsMjcuN2MtMTAuMywzLjctMjEuMyw1LjQtMzIuMiw1Yy03LjMsMC40LTE0LjYtMC43LTIxLjMtMy40Yy02LjgtMi43LTEyLjktNi44LTE3LjktMTIuMWMtMTAuMy0xMC45LTE0LjEtMjktMTQuMS01Mi45IFY4Ni41aC0zMVY1OC4zaDMxVjkuNkwtNDEzLjcsMEwtNDEzLjcsMHoiLz4KICAgICAgPC9nPgogICAgPC9nPgogICAgPGcgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoOTc0LjQzMzI4NiwgNTMuNDc5NjM4KSI+CiAgICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDAuOTkwMDM0LCAwLjYxMDMzOSkiPgogICAgICAgIDxwYXRoIGQ9Ik0tNDQ1LjgsMTEzYzAuOCw1MCwzMi4yLDcwLjYsNjguNiw3MC42YzE5LDAuNiwzNy45LTMsNTUuMy0xMC41bDYuMiwyNi40IGMtMjAuOSw4LjktNDMuNSwxMy4xLTY2LjIsMTIuNmMtNjEuNSwwLTk4LjMtNDEuMi05OC4zLTEwMi41Qy00ODAuMiw0OC4yLTQ0NC43LDAtMzg2LjUsMGM2NS4yLDAsODIuNyw1OC4zLDgyLjcsOTUuNyBjLTAuMSw1LjgtMC41LDExLjUtMS4yLDE3LjJoLTE0MC42SC00NDUuOEwtNDQ1LjgsMTEzeiBNLTMzOS4yLDg2LjZjMC40LTIzLjUtOS41LTYwLjEtNTAuNC02MC4xIGMtMzYuOCwwLTUyLjgsMzQuNC01NS43LDYwLjFILTMzOS4yTC0zMzkuMiw4Ni42TC0zMzkuMiw4Ni42eiIvPgogICAgICA8L2c+CiAgICA8L2c+CiAgICA8ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjAxLjk2MTA1OCwgNTMuNDc5NjM4KSI+CiAgICAgIDxnIHRyYW5zZm9ybT0idHJhbnNsYXRlKDEuMTc5NjQwLCAwLjcwNTA2OCkiPgogICAgICAgIDxwYXRoIGQ9Ik0tNDc4LjYsNjhjMC0yMy45LTAuNC00NC41LTEuNy02My40aDMxLjhsMS4yLDM5LjloMS43YzkuMS0yNy4zLDMxLTQ0LjUsNTUuMy00NC41IGMzLjUtMC4xLDcsMC40LDEwLjMsMS4ydjM0LjhjLTQuMS0wLjktOC4yLTEuMy0xMi40LTEuMmMtMjUuNiwwLTQzLjcsMTkuNy00OC43LDQ3LjRjLTEsNS43LTEuNiwxMS41LTEuNywxNy4ydjEwOC4zaC0zNlY2OCBMLTQ3OC42LDY4eiIvPgogICAgICA8L2c+CiAgICA8L2c+CiAgPC9nPgoKICA8ZyBjbGFzcz0ianAtaWNvbi13YXJuMCIgZmlsbD0iI0YzNzcyNiI+CiAgICA8cGF0aCBkPSJNMTM1Mi4zLDMyNi4yaDM3VjI4aC0zN1YzMjYuMnogTTE2MDQuOCwzMjYuMmMtMi41LTEzLjktMy40LTMxLjEtMy40LTQ4Ljd2LTc2IGMwLTQwLjctMTUuMS04My4xLTc3LjMtODMuMWMtMjUuNiwwLTUwLDcuMS02Ni44LDE4LjFsOC40LDI0LjRjMTQuMy05LjIsMzQtMTUuMSw1My0xNS4xYzQxLjYsMCw0Ni4yLDMwLjIsNDYuMiw0N3Y0LjIgYy03OC42LTAuNC0xMjIuMywyNi41LTEyMi4zLDc1LjZjMCwyOS40LDIxLDU4LjQsNjIuMiw1OC40YzI5LDAsNTAuOS0xNC4zLDYyLjItMzAuMmgxLjNsMi45LDI1LjZIMTYwNC44eiBNMTU2NS43LDI1Ny43IGMwLDMuOC0wLjgsOC0yLjEsMTEuOGMtNS45LDE3LjItMjIuNywzNC00OS4yLDM0Yy0xOC45LDAtMzQuOS0xMS4zLTM0LjktMzUuM2MwLTM5LjUsNDUuOC00Ni42LDg2LjItNDUuOFYyNTcuN3ogTTE2OTguNSwzMjYuMiBsMS43LTMzLjZoMS4zYzE1LjEsMjYuOSwzOC43LDM4LjIsNjguMSwzOC4yYzQ1LjQsMCw5MS4yLTM2LjEsOTEuMi0xMDguOGMwLjQtNjEuNy0zNS4zLTEwMy43LTg1LjctMTAzLjcgYy0zMi44LDAtNTYuMywxNC43LTY5LjMsMzcuNGgtMC44VjI4aC0zNi42djI0NS43YzAsMTguMS0wLjgsMzguNi0xLjcsNTIuNUgxNjk4LjV6IE0xNzA0LjgsMjA4LjJjMC01LjksMS4zLTEwLjksMi4xLTE1LjEgYzcuNi0yOC4xLDMxLjEtNDUuNCw1Ni4zLTQ1LjRjMzkuNSwwLDYwLjUsMzQuOSw2MC41LDc1LjZjMCw0Ni42LTIzLjEsNzguMS02MS44LDc4LjFjLTI2LjksMC00OC4zLTE3LjYtNTUuNS00My4zIGMtMC44LTQuMi0xLjctOC44LTEuNy0xMy40VjIwOC4yeiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-kernel: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICAgIDxwYXRoIGNsYXNzPSJqcC1pY29uMiIgZmlsbD0iIzYxNjE2MSIgZD0iTTE1IDlIOXY2aDZWOXptLTIgNGgtMnYtMmgydjJ6bTgtMlY5aC0yVjdjMC0xLjEtLjktMi0yLTJoLTJWM2gtMnYyaC0yVjNIOXYySDdjLTEuMSAwLTIgLjktMiAydjJIM3YyaDJ2MkgzdjJoMnYyYzAgMS4xLjkgMiAyIDJoMnYyaDJ2LTJoMnYyaDJ2LTJoMmMxLjEgMCAyLS45IDItMnYtMmgydi0yaC0ydi0yaDJ6bS00IDZIN1Y3aDEwdjEweiIvPgo8L3N2Zz4K);--jp-icon-keyboard: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aCBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIiBkPSJNMjAgNUg0Yy0xLjEgMC0xLjk5LjktMS45OSAyTDIgMTdjMCAxLjEuOSAyIDIgMmgxNmMxLjEgMCAyLS45IDItMlY3YzAtMS4xLS45LTItMi0yem0tOSAzaDJ2MmgtMlY4em0wIDNoMnYyaC0ydi0yek04IDhoMnYySDhWOHptMCAzaDJ2Mkg4di0yem0tMSAySDV2LTJoMnYyem0wLTNINVY4aDJ2MnptOSA3SDh2LTJoOHYyem0wLTRoLTJ2LTJoMnYyem0wLTNoLTJWOGgydjJ6bTMgM2gtMnYtMmgydjJ6bTAtM2gtMlY4aDJ2MnoiLz4KPC9zdmc+Cg==);--jp-icon-launch: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMzIgMzIiIHdpZHRoPSIzMiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIj4KICAgIDxwYXRoIGQ9Ik0yNiwyOEg2YTIuMDAyNywyLjAwMjcsMCwwLDEtMi0yVjZBMi4wMDI3LDIuMDAyNywwLDAsMSw2LDRIMTZWNkg2VjI2SDI2VjE2aDJWMjZBMi4wMDI3LDIuMDAyNywwLDAsMSwyNiwyOFoiLz4KICAgIDxwb2x5Z29uIHBvaW50cz0iMjAgMiAyMCA0IDI2LjU4NiA0IDE4IDEyLjU4NiAxOS40MTQgMTQgMjggNS40MTQgMjggMTIgMzAgMTIgMzAgMiAyMCAyIi8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-launcher: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aCBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIiBkPSJNMTkgMTlINVY1aDdWM0g1YTIgMiAwIDAwLTIgMnYxNGEyIDIgMCAwMDIgMmgxNGMxLjEgMCAyLS45IDItMnYtN2gtMnY3ek0xNCAzdjJoMy41OWwtOS44MyA5LjgzIDEuNDEgMS40MUwxOSA2LjQxVjEwaDJWM2gtN3oiLz4KPC9zdmc+Cg==);--jp-icon-line-form: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICAgIDxwYXRoIGZpbGw9IndoaXRlIiBkPSJNNS44OCA0LjEyTDEzLjc2IDEybC03Ljg4IDcuODhMOCAyMmwxMC0xMEw4IDJ6Ii8+Cjwvc3ZnPgo=);--jp-icon-link: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTMuOSAxMmMwLTEuNzEgMS4zOS0zLjEgMy4xLTMuMWg0VjdIN2MtMi43NiAwLTUgMi4yNC01IDVzMi4yNCA1IDUgNWg0di0xLjlIN2MtMS43MSAwLTMuMS0xLjM5LTMuMS0zLjF6TTggMTNoOHYtMkg4djJ6bTktNmgtNHYxLjloNGMxLjcxIDAgMy4xIDEuMzkgMy4xIDMuMXMtMS4zOSAzLjEtMy4xIDMuMWgtNFYxN2g0YzIuNzYgMCA1LTIuMjQgNS01cy0yLjI0LTUtNS01eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-list: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICAgIDxwYXRoIGNsYXNzPSJqcC1pY29uMiBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiM2MTYxNjEiIGQ9Ik0xOSA1djE0SDVWNWgxNG0xLjEtMkgzLjljLS41IDAtLjkuNC0uOS45djE2LjJjMCAuNC40LjkuOS45aDE2LjJjLjQgMCAuOS0uNS45LS45VjMuOWMwLS41LS41LS45LS45LS45ek0xMSA3aDZ2MmgtNlY3em0wIDRoNnYyaC02di0yem0wIDRoNnYyaC02ek03IDdoMnYySDd6bTAgNGgydjJIN3ptMCA0aDJ2Mkg3eiIvPgo8L3N2Zz4=);--jp-icon-listings-info: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCA1MC45NzggNTAuOTc4IiBzdHlsZT0iZW5hYmxlLWJhY2tncm91bmQ6bmV3IDAgMCA1MC45NzggNTAuOTc4OyIgeG1sOnNwYWNlPSJwcmVzZXJ2ZSI+Cgk8Zz4KCQk8cGF0aCBzdHlsZT0iZmlsbDojMDEwMDAyOyIgZD0iTTQzLjUyLDcuNDU4QzM4LjcxMSwyLjY0OCwzMi4zMDcsMCwyNS40ODksMEMxOC42NywwLDEyLjI2NiwyLjY0OCw3LjQ1OCw3LjQ1OAoJCQljLTkuOTQzLDkuOTQxLTkuOTQzLDI2LjExOSwwLDM2LjA2MmM0LjgwOSw0LjgwOSwxMS4yMTIsNy40NTYsMTguMDMxLDcuNDU4YzAsMCwwLjAwMSwwLDAuMDAyLDAKCQkJYzYuODE2LDAsMTMuMjIxLTIuNjQ4LDE4LjAyOS03LjQ1OGM0LjgwOS00LjgwOSw3LjQ1Ny0xMS4yMTIsNy40NTctMTguMDNDNTAuOTc3LDE4LjY3LDQ4LjMyOCwxMi4yNjYsNDMuNTIsNy40NTh6CgkJCSBNNDIuMTA2LDQyLjEwNWMtNC40MzIsNC40MzEtMTAuMzMyLDYuODcyLTE2LjYxNSw2Ljg3MmgtMC4wMDJjLTYuMjg1LTAuMDAxLTEyLjE4Ny0yLjQ0MS0xNi42MTctNi44NzIKCQkJYy05LjE2Mi05LjE2My05LjE2Mi0yNC4wNzEsMC0zMy4yMzNDMTMuMzAzLDQuNDQsMTkuMjA0LDIsMjUuNDg5LDJjNi4yODQsMCwxMi4xODYsMi40NCwxNi42MTcsNi44NzIKCQkJYzQuNDMxLDQuNDMxLDYuODcxLDEwLjMzMiw2Ljg3MSwxNi42MTdDNDguOTc3LDMxLjc3Miw0Ni41MzYsMzcuNjc1LDQyLjEwNiw0Mi4xMDV6Ii8+CgkJPHBhdGggc3R5bGU9ImZpbGw6IzAxMDAwMjsiIGQ9Ik0yMy41NzgsMzIuMjE4Yy0wLjAyMy0xLjczNCwwLjE0My0zLjA1OSwwLjQ5Ni0zLjk3MmMwLjM1My0wLjkxMywxLjExLTEuOTk3LDIuMjcyLTMuMjUzCgkJCWMwLjQ2OC0wLjUzNiwwLjkyMy0xLjA2MiwxLjM2Ny0xLjU3NWMwLjYyNi0wLjc1MywxLjEwNC0xLjQ3OCwxLjQzNi0yLjE3NWMwLjMzMS0wLjcwNywwLjQ5NS0xLjU0MSwwLjQ5NS0yLjUKCQkJYzAtMS4wOTYtMC4yNi0yLjA4OC0wLjc3OS0yLjk3OWMtMC41NjUtMC44NzktMS41MDEtMS4zMzYtMi44MDYtMS4zNjljLTEuODAyLDAuMDU3LTIuOTg1LDAuNjY3LTMuNTUsMS44MzIKCQkJYy0wLjMwMSwwLjUzNS0wLjUwMywxLjE0MS0wLjYwNywxLjgxNGMtMC4xMzksMC43MDctMC4yMDcsMS40MzItMC4yMDcsMi4xNzRoLTIuOTM3Yy0wLjA5MS0yLjIwOCwwLjQwNy00LjExNCwxLjQ5My01LjcxOQoJCQljMS4wNjItMS42NCwyLjg1NS0yLjQ4MSw1LjM3OC0yLjUyN2MyLjE2LDAuMDIzLDMuODc0LDAuNjA4LDUuMTQxLDEuNzU4YzEuMjc4LDEuMTYsMS45MjksMi43NjQsMS45NSw0LjgxMQoJCQljMCwxLjE0Mi0wLjEzNywyLjExMS0wLjQxLDIuOTExYy0wLjMwOSwwLjg0NS0wLjczMSwxLjU5My0xLjI2OCwyLjI0M2MtMC40OTIsMC42NS0xLjA2OCwxLjMxOC0xLjczLDIuMDAyCgkJCWMtMC42NSwwLjY5Ny0xLjMxMywxLjQ3OS0xLjk4NywyLjM0NmMtMC4yMzksMC4zNzctMC40MjksMC43NzctMC41NjUsMS4xOTljLTAuMTYsMC45NTktMC4yMTcsMS45NTEtMC4xNzEsMi45NzkKCQkJQzI2LjU4OSwzMi4yMTgsMjMuNTc4LDMyLjIxOCwyMy41NzgsMzIuMjE4eiBNMjMuNTc4LDM4LjIydi0zLjQ4NGgzLjA3NnYzLjQ4NEgyMy41Nzh6Ii8+Cgk8L2c+Cjwvc3ZnPgo=);--jp-icon-markdown: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8cGF0aCBjbGFzcz0ianAtaWNvbi1jb250cmFzdDAganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjN0IxRkEyIiBkPSJNNSAxNC45aDEybC02LjEgNnptOS40LTYuOGMwLTEuMy0uMS0yLjktLjEtNC41LS40IDEuNC0uOSAyLjktMS4zIDQuM2wtMS4zIDQuM2gtMkw4LjUgNy45Yy0uNC0xLjMtLjctMi45LTEtNC4zLS4xIDEuNi0uMSAzLjItLjIgNC42TDcgMTIuNEg0LjhsLjctMTFoMy4zTDEwIDVjLjQgMS4yLjcgMi43IDEgMy45LjMtMS4yLjctMi42IDEtMy45bDEuMi0zLjdoMy4zbC42IDExaC0yLjRsLS4zLTQuMnoiLz4KPC9zdmc+Cg==);--jp-icon-move-down: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTQiIHZpZXdCb3g9IjAgMCAxNCAxNCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggY2xhc3M9ImpwLWljb24zIiBkPSJNMTIuNDcxIDcuNTI4OTlDMTIuNzYzMiA3LjIzNjg0IDEyLjc2MzIgNi43NjMxNiAxMi40NzEgNi40NzEwMVY2LjQ3MTAxQzEyLjE3OSA2LjE3OTA1IDExLjcwNTcgNi4xNzg4NCAxMS40MTM1IDYuNDcwNTRMNy43NSAxMC4xMjc1VjEuNzVDNy43NSAxLjMzNTc5IDcuNDE0MjEgMSA3IDFWMUM2LjU4NTc5IDEgNi4yNSAxLjMzNTc5IDYuMjUgMS43NVYxMC4xMjc1TDIuNTk3MjYgNi40NjgyMkMyLjMwMzM4IDYuMTczODEgMS44MjY0MSA2LjE3MzU5IDEuNTMyMjYgNi40Njc3NFY2LjQ2Nzc0QzEuMjM4MyA2Ljc2MTcgMS4yMzgzIDcuMjM4MyAxLjUzMjI2IDcuNTMyMjZMNi4yOTI4OSAxMi4yOTI5QzYuNjgzNDIgMTIuNjgzNCA3LjMxNjU4IDEyLjY4MzQgNy43MDcxMSAxMi4yOTI5TDEyLjQ3MSA3LjUyODk5WiIgZmlsbD0iIzYxNjE2MSIvPgo8L3N2Zz4K);--jp-icon-move-up: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTQiIHZpZXdCb3g9IjAgMCAxNCAxNCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggY2xhc3M9ImpwLWljb24zIiBkPSJNMS41Mjg5OSA2LjQ3MTAxQzEuMjM2ODQgNi43NjMxNiAxLjIzNjg0IDcuMjM2ODQgMS41Mjg5OSA3LjUyODk5VjcuNTI4OTlDMS44MjA5NSA3LjgyMDk1IDIuMjk0MjYgNy44MjExNiAyLjU4NjQ5IDcuNTI5NDZMNi4yNSAzLjg3MjVWMTIuMjVDNi4yNSAxMi42NjQyIDYuNTg1NzkgMTMgNyAxM1YxM0M3LjQxNDIxIDEzIDcuNzUgMTIuNjY0MiA3Ljc1IDEyLjI1VjMuODcyNUwxMS40MDI3IDcuNTMxNzhDMTEuNjk2NiA3LjgyNjE5IDEyLjE3MzYgNy44MjY0MSAxMi40Njc3IDcuNTMyMjZWNy41MzIyNkMxMi43NjE3IDcuMjM4MyAxMi43NjE3IDYuNzYxNyAxMi40Njc3IDYuNDY3NzRMNy43MDcxMSAxLjcwNzExQzcuMzE2NTggMS4zMTY1OCA2LjY4MzQyIDEuMzE2NTggNi4yOTI4OSAxLjcwNzExTDEuNTI4OTkgNi40NzEwMVoiIGZpbGw9IiM2MTYxNjEiLz4KPC9zdmc+Cg==);--jp-icon-new-folder: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTIwIDZoLThsLTItMkg0Yy0xLjExIDAtMS45OS44OS0xLjk5IDJMMiAxOGMwIDEuMTEuODkgMiAyIDJoMTZjMS4xMSAwIDItLjg5IDItMlY4YzAtMS4xMS0uODktMi0yLTJ6bS0xIDhoLTN2M2gtMnYtM2gtM3YtMmgzVjloMnYzaDN2MnoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-not-trusted: url(data:image/svg+xml;base64,PHN2ZyBmaWxsPSJub25lIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI1IDI1Ij4KICAgIDxwYXRoIGNsYXNzPSJqcC1pY29uMiIgc3Ryb2tlPSIjMzMzMzMzIiBzdHJva2Utd2lkdGg9IjIiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDMgMykiIGQ9Ik0xLjg2MDk0IDExLjQ0MDlDMC44MjY0NDggOC43NzAyNyAwLjg2Mzc3OSA2LjA1NzY0IDEuMjQ5MDcgNC4xOTkzMkMyLjQ4MjA2IDMuOTMzNDcgNC4wODA2OCAzLjQwMzQ3IDUuNjAxMDIgMi44NDQ5QzcuMjM1NDkgMi4yNDQ0IDguODU2NjYgMS41ODE1IDkuOTg3NiAxLjA5NTM5QzExLjA1OTcgMS41ODM0MSAxMi42MDk0IDIuMjQ0NCAxNC4yMTggMi44NDMzOUMxNS43NTAzIDMuNDEzOTQgMTcuMzk5NSAzLjk1MjU4IDE4Ljc1MzkgNC4yMTM4NUMxOS4xMzY0IDYuMDcxNzcgMTkuMTcwOSA4Ljc3NzIyIDE4LjEzOSAxMS40NDA5QzE3LjAzMDMgMTQuMzAzMiAxNC42NjY4IDE3LjE4NDQgOS45OTk5OSAxOC45MzU0QzUuMzMzMTkgMTcuMTg0NCAyLjk2OTY4IDE0LjMwMzIgMS44NjA5NCAxMS40NDA5WiIvPgogICAgPHBhdGggY2xhc3M9ImpwLWljb24yIiBzdHJva2U9IiMzMzMzMzMiIHN0cm9rZS13aWR0aD0iMiIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoOS4zMTU5MiA5LjMyMDMxKSIgZD0iTTcuMzY4NDIgMEwwIDcuMzY0NzkiLz4KICAgIDxwYXRoIGNsYXNzPSJqcC1pY29uMiIgc3Ryb2tlPSIjMzMzMzMzIiBzdHJva2Utd2lkdGg9IjIiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDkuMzE1OTIgMTYuNjgzNikgc2NhbGUoMSAtMSkiIGQ9Ik03LjM2ODQyIDBMMCA3LjM2NDc5Ii8+Cjwvc3ZnPgo=);--jp-icon-notebook: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8ZyBjbGFzcz0ianAtbm90ZWJvb2staWNvbi1jb2xvciBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiNFRjZDMDAiPgogICAgPHBhdGggZD0iTTE4LjcgMy4zdjE1LjRIMy4zVjMuM2gxNS40bTEuNS0xLjVIMS44djE4LjNoMTguM2wuMS0xOC4zeiIvPgogICAgPHBhdGggZD0iTTE2LjUgMTYuNWwtNS40LTQuMy01LjYgNC4zdi0xMWgxMXoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-numbering: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjIiIGhlaWdodD0iMjIiIHZpZXdCb3g9IjAgMCAyOCAyOCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CgkJPHBhdGggZD0iTTQgMTlINlYxOS41SDVWMjAuNUg2VjIxSDRWMjJIN1YxOEg0VjE5Wk01IDEwSDZWNkg0VjdINVYxMFpNNCAxM0g1LjhMNCAxNS4xVjE2SDdWMTVINS4yTDcgMTIuOVYxMkg0VjEzWk05IDdWOUgyM1Y3SDlaTTkgMjFIMjNWMTlIOVYyMVpNOSAxNUgyM1YxM0g5VjE1WiIvPgoJPC9nPgo8L3N2Zz4K);--jp-icon-offline-bolt: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCIgd2lkdGg9IjE2Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTEyIDIuMDJjLTUuNTEgMC05Ljk4IDQuNDctOS45OCA5Ljk4czQuNDcgOS45OCA5Ljk4IDkuOTggOS45OC00LjQ3IDkuOTgtOS45OFMxNy41MSAyLjAyIDEyIDIuMDJ6TTExLjQ4IDIwdi02LjI2SDhMMTMgNHY2LjI2aDMuMzVMMTEuNDggMjB6Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-palette: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTE4IDEzVjIwSDRWNkg5LjAyQzkuMDcgNS4yOSA5LjI0IDQuNjIgOS41IDRINEMyLjkgNCAyIDQuOSAyIDZWMjBDMiAyMS4xIDIuOSAyMiA0IDIySDE4QzE5LjEgMjIgMjAgMjEuMSAyMCAyMFYxNUwxOCAxM1pNMTkuMyA4Ljg5QzE5Ljc0IDguMTkgMjAgNy4zOCAyMCA2LjVDMjAgNC4wMSAxNy45OSAyIDE1LjUgMkMxMy4wMSAyIDExIDQuMDEgMTEgNi41QzExIDguOTkgMTMuMDEgMTEgMTUuNDkgMTFDMTYuMzcgMTEgMTcuMTkgMTAuNzQgMTcuODggMTAuM0wyMSAxMy40MkwyMi40MiAxMkwxOS4zIDguODlaTTE1LjUgOUMxNC4xMiA5IDEzIDcuODggMTMgNi41QzEzIDUuMTIgMTQuMTIgNCAxNS41IDRDMTYuODggNCAxOCA1LjEyIDE4IDYuNUMxOCA3Ljg4IDE2Ljg4IDkgMTUuNSA5WiIvPgogICAgPHBhdGggZmlsbC1ydWxlPSJldmVub2RkIiBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik00IDZIOS4wMTg5NEM5LjAwNjM5IDYuMTY1MDIgOSA2LjMzMTc2IDkgNi41QzkgOC44MTU3NyAxMC4yMTEgMTAuODQ4NyAxMi4wMzQzIDEySDlWMTRIMTZWMTIuOTgxMUMxNi41NzAzIDEyLjkzNzcgMTcuMTIgMTIuODIwNyAxNy42Mzk2IDEyLjYzOTZMMTggMTNWMjBINFY2Wk04IDhINlYxMEg4VjhaTTYgMTJIOFYxNEg2VjEyWk04IDE2SDZWMThIOFYxNlpNOSAxNkgxNlYxOEg5VjE2WiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-paste: url(data:image/svg+xml;base64,PHN2ZyBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTE5IDJoLTQuMThDMTQuNC44NCAxMy4zIDAgMTIgMGMtMS4zIDAtMi40Ljg0LTIuODIgMkg1Yy0xLjEgMC0yIC45LTIgMnYxNmMwIDEuMS45IDIgMiAyaDE0YzEuMSAwIDItLjkgMi0yVjRjMC0xLjEtLjktMi0yLTJ6bS03IDBjLjU1IDAgMSAuNDUgMSAxcy0uNDUgMS0xIDEtMS0uNDUtMS0xIC40NS0xIDEtMXptNyAxOEg1VjRoMnYzaDEwVjRoMnYxNnoiLz4KICAgIDwvZz4KPC9zdmc+Cg==);--jp-icon-pdf: url(data:image/svg+xml;base64,PHN2ZwogICB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyMiAyMiIgd2lkdGg9IjE2Ij4KICAgIDxwYXRoIHRyYW5zZm9ybT0icm90YXRlKDQ1KSIgY2xhc3M9ImpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iI0ZGMkEyQSIKICAgICAgIGQ9Im0gMjIuMzQ0MzY5LC0zLjAxNjM2NDIgaCA1LjYzODYwNCB2IDEuNTc5MjQzMyBoIC0zLjU0OTIyNyB2IDEuNTA4NjkyOTkgaCAzLjMzNzU3NiBWIDEuNjUwODE1NCBoIC0zLjMzNzU3NiB2IDMuNDM1MjYxMyBoIC0yLjA4OTM3NyB6IG0gLTcuMTM2NDQ0LDEuNTc5MjQzMyB2IDQuOTQzOTU0MyBoIDAuNzQ4OTIgcSAxLjI4MDc2MSwwIDEuOTUzNzAzLC0wLjYzNDk1MzUgMC42NzgzNjksLTAuNjM0OTUzNSAwLjY3ODM2OSwtMS44NDUxNjQxIDAsLTEuMjA0NzgzNTUgLTAuNjcyOTQyLC0xLjgzNDMxMDExIC0wLjY3Mjk0MiwtMC42Mjk1MjY1OSAtMS45NTkxMywtMC42Mjk1MjY1OSB6IG0gLTIuMDg5Mzc3LC0xLjU3OTI0MzMgaCAyLjIwMzM0MyBxIDEuODQ1MTY0LDAgMi43NDYwMzksMC4yNjU5MjA3IDAuOTA2MzAxLDAuMjYwNDkzNyAxLjU1MjEwOCwwLjg5MDAyMDMgMC41Njk4MywwLjU0ODEyMjMgMC44NDY2MDUsMS4yNjQ0ODAwNiAwLjI3Njc3NCwwLjcxNjM1NzgxIDAuMjc2Nzc0LDEuNjIyNjU4OTQgMCwwLjkxNzE1NTEgLTAuMjc2Nzc0LDEuNjM4OTM5OSAtMC4yNzY3NzUsMC43MTYzNTc4IC0wLjg0NjYwNSwxLjI2NDQ4IC0wLjY1MTIzNCwwLjYyOTUyNjYgLTEuNTYyOTYyLDAuODk1NDQ3MyAtMC45MTE3MjgsMC4yNjA0OTM3IC0yLjczNTE4NSwwLjI2MDQ5MzcgaCAtMi4yMDMzNDMgeiBtIC04LjE0NTg1NjUsMCBoIDMuNDY3ODIzIHEgMS41NDY2ODE2LDAgMi4zNzE1Nzg1LDAuNjg5MjIzIDAuODMwMzI0LDAuNjgzNzk2MSAwLjgzMDMyNCwxLjk1MzcwMzE0IDAsMS4yNzUzMzM5NyAtMC44MzAzMjQsMS45NjQ1NTcwNiBRIDkuOTg3MTk2MSwyLjI3NDkxNSA4LjQ0MDUxNDUsMi4yNzQ5MTUgSCA3LjA2MjA2ODQgViA1LjA4NjA3NjcgSCA0Ljk3MjY5MTUgWiBtIDIuMDg5Mzc2OSwxLjUxNDExOTkgdiAyLjI2MzAzOTQzIGggMS4xNTU5NDEgcSAwLjYwNzgxODgsMCAwLjkzODg2MjksLTAuMjkzMDU1NDcgMC4zMzEwNDQxLC0wLjI5ODQ4MjQxIDAuMzMxMDQ0MSwtMC44NDExNzc3MiAwLC0wLjU0MjY5NTMxIC0wLjMzMTA0NDEsLTAuODM1NzUwNzQgLTAuMzMxMDQ0MSwtMC4yOTMwNTU1IC0wLjkzODg2MjksLTAuMjkzMDU1NSB6IgovPgo8L3N2Zz4K);--jp-icon-python: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iLTEwIC0xMCAxMzEuMTYxMzYxNjk0MzM1OTQgMTMyLjM4ODk5OTkzODk2NDg0Ij4KICA8cGF0aCBjbGFzcz0ianAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjMzA2OTk4IiBkPSJNIDU0LjkxODc4NSw5LjE5Mjc0MjFlLTQgQyA1MC4zMzUxMzIsMC4wMjIyMTcyNyA0NS45NTc4NDYsMC40MTMxMzY5NyA0Mi4xMDYyODUsMS4wOTQ2NjkzIDMwLjc2MDA2OSwzLjA5OTE3MzEgMjguNzAwMDM2LDcuMjk0NzcxNCAyOC43MDAwMzUsMTUuMDMyMTY5IHYgMTAuMjE4NzUgaCAyNi44MTI1IHYgMy40MDYyNSBoIC0yNi44MTI1IC0xMC4wNjI1IGMgLTcuNzkyNDU5LDAgLTE0LjYxNTc1ODgsNC42ODM3MTcgLTE2Ljc0OTk5OTgsMTMuNTkzNzUgLTIuNDYxODE5OTgsMTAuMjEyOTY2IC0yLjU3MTAxNTA4LDE2LjU4NjAyMyAwLDI3LjI1IDEuOTA1OTI4Myw3LjkzNzg1MiA2LjQ1NzU0MzIsMTMuNTkzNzQ4IDE0LjI0OTk5OTgsMTMuNTkzNzUgaCA5LjIxODc1IHYgLTEyLjI1IGMgMCwtOC44NDk5MDIgNy42NTcxNDQsLTE2LjY1NjI0OCAxNi43NSwtMTYuNjU2MjUgaCAyNi43ODEyNSBjIDcuNDU0OTUxLDAgMTMuNDA2MjUzLC02LjEzODE2NCAxMy40MDYyNSwtMTMuNjI1IHYgLTI1LjUzMTI1IGMgMCwtNy4yNjYzMzg2IC02LjEyOTk4LC0xMi43MjQ3NzcxIC0xMy40MDYyNSwtMTMuOTM3NDk5NyBDIDY0LjI4MTU0OCwwLjMyNzk0Mzk3IDU5LjUwMjQzOCwtMC4wMjAzNzkwMyA1NC45MTg3ODUsOS4xOTI3NDIxZS00IFogbSAtMTQuNSw4LjIxODc1MDEyNTc5IGMgMi43Njk1NDcsMCA1LjAzMTI1LDIuMjk4NjQ1NiA1LjAzMTI1LDUuMTI0OTk5NiAtMmUtNiwyLjgxNjMzNiAtMi4yNjE3MDMsNS4wOTM3NSAtNS4wMzEyNSw1LjA5Mzc1IC0yLjc3OTQ3NiwtMWUtNiAtNS4wMzEyNSwtMi4yNzc0MTUgLTUuMDMxMjUsLTUuMDkzNzUgLTEwZS03LC0yLjgyNjM1MyAyLjI1MTc3NCwtNS4xMjQ5OTk2IDUuMDMxMjUsLTUuMTI0OTk5NiB6Ii8+CiAgPHBhdGggY2xhc3M9ImpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iI2ZmZDQzYiIgZD0ibSA4NS42Mzc1MzUsMjguNjU3MTY5IHYgMTEuOTA2MjUgYyAwLDkuMjMwNzU1IC03LjgyNTg5NSwxNi45OTk5OTkgLTE2Ljc1LDE3IGggLTI2Ljc4MTI1IGMgLTcuMzM1ODMzLDAgLTEzLjQwNjI0OSw2LjI3ODQ4MyAtMTMuNDA2MjUsMTMuNjI1IHYgMjUuNTMxMjQ3IGMgMCw3LjI2NjM0NCA2LjMxODU4OCwxMS41NDAzMjQgMTMuNDA2MjUsMTMuNjI1MDA0IDguNDg3MzMxLDIuNDk1NjEgMTYuNjI2MjM3LDIuOTQ2NjMgMjYuNzgxMjUsMCA2Ljc1MDE1NSwtMS45NTQzOSAxMy40MDYyNTMsLTUuODg3NjEgMTMuNDA2MjUsLTEzLjYyNTAwNCBWIDg2LjUwMDkxOSBoIC0yNi43ODEyNSB2IC0zLjQwNjI1IGggMjYuNzgxMjUgMTMuNDA2MjU0IGMgNy43OTI0NjEsMCAxMC42OTYyNTEsLTUuNDM1NDA4IDEzLjQwNjI0MSwtMTMuNTkzNzUgMi43OTkzMywtOC4zOTg4ODYgMi42ODAyMiwtMTYuNDc1Nzc2IDAsLTI3LjI1IC0xLjkyNTc4LC03Ljc1NzQ0MSAtNS42MDM4NywtMTMuNTkzNzUgLTEzLjQwNjI0MSwtMTMuNTkzNzUgeiBtIC0xNS4wNjI1LDY0LjY1NjI1IGMgMi43Nzk0NzgsM2UtNiA1LjAzMTI1LDIuMjc3NDE3IDUuMDMxMjUsNS4wOTM3NDcgLTJlLTYsMi44MjYzNTQgLTIuMjUxNzc1LDUuMTI1MDA0IC01LjAzMTI1LDUuMTI1MDA0IC0yLjc2OTU1LDAgLTUuMDMxMjUsLTIuMjk4NjUgLTUuMDMxMjUsLTUuMTI1MDA0IDJlLTYsLTIuODE2MzMgMi4yNjE2OTcsLTUuMDkzNzQ3IDUuMDMxMjUsLTUuMDkzNzQ3IHoiLz4KPC9zdmc+Cg==);--jp-icon-r-kernel: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8cGF0aCBjbGFzcz0ianAtaWNvbi1jb250cmFzdDMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjMjE5NkYzIiBkPSJNNC40IDIuNWMxLjItLjEgMi45LS4zIDQuOS0uMyAyLjUgMCA0LjEuNCA1LjIgMS4zIDEgLjcgMS41IDEuOSAxLjUgMy41IDAgMi0xLjQgMy41LTIuOSA0LjEgMS4yLjQgMS43IDEuNiAyLjIgMyAuNiAxLjkgMSAzLjkgMS4zIDQuNmgtMy44Yy0uMy0uNC0uOC0xLjctMS4yLTMuN3MtMS4yLTIuNi0yLjYtMi42aC0uOXY2LjRINC40VjIuNXptMy43IDYuOWgxLjRjMS45IDAgMi45LS45IDIuOS0yLjNzLTEtMi4zLTIuOC0yLjNjLS43IDAtMS4zIDAtMS42LjJ2NC41aC4xdi0uMXoiLz4KPC9zdmc+Cg==);--jp-icon-react: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMTUwIDE1MCA1NDEuOSAyOTUuMyI+CiAgPGcgY2xhc3M9ImpwLWljb24tYnJhbmQyIGpwLWljb24tc2VsZWN0YWJsZSIgZmlsbD0iIzYxREFGQiI+CiAgICA8cGF0aCBkPSJNNjY2LjMgMjk2LjVjMC0zMi41LTQwLjctNjMuMy0xMDMuMS04Mi40IDE0LjQtNjMuNiA4LTExNC4yLTIwLjItMTMwLjQtNi41LTMuOC0xNC4xLTUuNi0yMi40LTUuNnYyMi4zYzQuNiAwIDguMy45IDExLjQgMi42IDEzLjYgNy44IDE5LjUgMzcuNSAxNC45IDc1LjctMS4xIDkuNC0yLjkgMTkuMy01LjEgMjkuNC0xOS42LTQuOC00MS04LjUtNjMuNS0xMC45LTEzLjUtMTguNS0yNy41LTM1LjMtNDEuNi01MCAzMi42LTMwLjMgNjMuMi00Ni45IDg0LTQ2LjlWNzhjLTI3LjUgMC02My41IDE5LjYtOTkuOSA1My42LTM2LjQtMzMuOC03Mi40LTUzLjItOTkuOS01My4ydjIyLjNjMjAuNyAwIDUxLjQgMTYuNSA4NCA0Ni42LTE0IDE0LjctMjggMzEuNC00MS4zIDQ5LjktMjIuNiAyLjQtNDQgNi4xLTYzLjYgMTEtMi4zLTEwLTQtMTkuNy01LjItMjktNC43LTM4LjIgMS4xLTY3LjkgMTQuNi03NS44IDMtMS44IDYuOS0yLjYgMTEuNS0yLjZWNzguNWMtOC40IDAtMTYgMS44LTIyLjYgNS42LTI4LjEgMTYuMi0zNC40IDY2LjctMTkuOSAxMzAuMS02Mi4yIDE5LjItMTAyLjcgNDkuOS0xMDIuNyA4Mi4zIDAgMzIuNSA0MC43IDYzLjMgMTAzLjEgODIuNC0xNC40IDYzLjYtOCAxMTQuMiAyMC4yIDEzMC40IDYuNSAzLjggMTQuMSA1LjYgMjIuNSA1LjYgMjcuNSAwIDYzLjUtMTkuNiA5OS45LTUzLjYgMzYuNCAzMy44IDcyLjQgNTMuMiA5OS45IDUzLjIgOC40IDAgMTYtMS44IDIyLjYtNS42IDI4LjEtMTYuMiAzNC40LTY2LjcgMTkuOS0xMzAuMSA2Mi0xOS4xIDEwMi41LTQ5LjkgMTAyLjUtODIuM3ptLTEzMC4yLTY2LjdjLTMuNyAxMi45LTguMyAyNi4yLTEzLjUgMzkuNS00LjEtOC04LjQtMTYtMTMuMS0yNC00LjYtOC05LjUtMTUuOC0xNC40LTIzLjQgMTQuMiAyLjEgMjcuOSA0LjcgNDEgNy45em0tNDUuOCAxMDYuNWMtNy44IDEzLjUtMTUuOCAyNi4zLTI0LjEgMzguMi0xNC45IDEuMy0zMCAyLTQ1LjIgMi0xNS4xIDAtMzAuMi0uNy00NS0xLjktOC4zLTExLjktMTYuNC0yNC42LTI0LjItMzgtNy42LTEzLjEtMTQuNS0yNi40LTIwLjgtMzkuOCA2LjItMTMuNCAxMy4yLTI2LjggMjAuNy0zOS45IDcuOC0xMy41IDE1LjgtMjYuMyAyNC4xLTM4LjIgMTQuOS0xLjMgMzAtMiA0NS4yLTIgMTUuMSAwIDMwLjIuNyA0NSAxLjkgOC4zIDExLjkgMTYuNCAyNC42IDI0LjIgMzggNy42IDEzLjEgMTQuNSAyNi40IDIwLjggMzkuOC02LjMgMTMuNC0xMy4yIDI2LjgtMjAuNyAzOS45em0zMi4zLTEzYzUuNCAxMy40IDEwIDI2LjggMTMuOCAzOS44LTEzLjEgMy4yLTI2LjkgNS45LTQxLjIgOCA0LjktNy43IDkuOC0xNS42IDE0LjQtMjMuNyA0LjYtOCA4LjktMTYuMSAxMy0yNC4xek00MjEuMiA0MzBjLTkuMy05LjYtMTguNi0yMC4zLTI3LjgtMzIgOSAuNCAxOC4yLjcgMjcuNS43IDkuNCAwIDE4LjctLjIgMjcuOC0uNy05IDExLjctMTguMyAyMi40LTI3LjUgMzJ6bS03NC40LTU4LjljLTE0LjItMi4xLTI3LjktNC43LTQxLTcuOSAzLjctMTIuOSA4LjMtMjYuMiAxMy41LTM5LjUgNC4xIDggOC40IDE2IDEzLjEgMjQgNC43IDggOS41IDE1LjggMTQuNCAyMy40ek00MjAuNyAxNjNjOS4zIDkuNiAxOC42IDIwLjMgMjcuOCAzMi05LS40LTE4LjItLjctMjcuNS0uNy05LjQgMC0xOC43LjItMjcuOC43IDktMTEuNyAxOC4zLTIyLjQgMjcuNS0zMnptLTc0IDU4LjljLTQuOSA3LjctOS44IDE1LjYtMTQuNCAyMy43LTQuNiA4LTguOSAxNi0xMyAyNC01LjQtMTMuNC0xMC0yNi44LTEzLjgtMzkuOCAxMy4xLTMuMSAyNi45LTUuOCA0MS4yLTcuOXptLTkwLjUgMTI1LjJjLTM1LjQtMTUuMS01OC4zLTM0LjktNTguMy01MC42IDAtMTUuNyAyMi45LTM1LjYgNTguMy01MC42IDguNi0zLjcgMTgtNyAyNy43LTEwLjEgNS43IDE5LjYgMTMuMiA0MCAyMi41IDYwLjktOS4yIDIwLjgtMTYuNiA0MS4xLTIyLjIgNjAuNi05LjktMy4xLTE5LjMtNi41LTI4LTEwLjJ6TTMxMCA0OTBjLTEzLjYtNy44LTE5LjUtMzcuNS0xNC45LTc1LjcgMS4xLTkuNCAyLjktMTkuMyA1LjEtMjkuNCAxOS42IDQuOCA0MSA4LjUgNjMuNSAxMC45IDEzLjUgMTguNSAyNy41IDM1LjMgNDEuNiA1MC0zMi42IDMwLjMtNjMuMiA0Ni45LTg0IDQ2LjktNC41LS4xLTguMy0xLTExLjMtMi43em0yMzcuMi03Ni4yYzQuNyAzOC4yLTEuMSA2Ny45LTE0LjYgNzUuOC0zIDEuOC02LjkgMi42LTExLjUgMi42LTIwLjcgMC01MS40LTE2LjUtODQtNDYuNiAxNC0xNC43IDI4LTMxLjQgNDEuMy00OS45IDIyLjYtMi40IDQ0LTYuMSA2My42LTExIDIuMyAxMC4xIDQuMSAxOS44IDUuMiAyOS4xem0zOC41LTY2LjdjLTguNiAzLjctMTggNy0yNy43IDEwLjEtNS43LTE5LjYtMTMuMi00MC0yMi41LTYwLjkgOS4yLTIwLjggMTYuNi00MS4xIDIyLjItNjAuNiA5LjkgMy4xIDE5LjMgNi41IDI4LjEgMTAuMiAzNS40IDE1LjEgNTguMyAzNC45IDU4LjMgNTAuNi0uMSAxNS43LTIzIDM1LjYtNTguNCA1MC42ek0zMjAuOCA3OC40eiIvPgogICAgPGNpcmNsZSBjeD0iNDIwLjkiIGN5PSIyOTYuNSIgcj0iNDUuNyIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-redo: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCAyNCAyNCIgd2lkdGg9IjE2Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgICA8cGF0aCBkPSJNMCAwaDI0djI0SDB6IiBmaWxsPSJub25lIi8+PHBhdGggZD0iTTE4LjQgMTAuNkMxNi41NSA4Ljk5IDE0LjE1IDggMTEuNSA4Yy00LjY1IDAtOC41OCAzLjAzLTkuOTYgNy4yMkwzLjkgMTZjMS4wNS0zLjE5IDQuMDUtNS41IDcuNi01LjUgMS45NSAwIDMuNzMuNzIgNS4xMiAxLjg4TDEzIDE2aDlWN2wtMy42IDMuNnoiLz4KICA8L2c+Cjwvc3ZnPgo=);--jp-icon-refresh: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDE4IDE4Ij4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTkgMTMuNWMtMi40OSAwLTQuNS0yLjAxLTQuNS00LjVTNi41MSA0LjUgOSA0LjVjMS4yNCAwIDIuMzYuNTIgMy4xNyAxLjMzTDEwIDhoNVYzbC0xLjc2IDEuNzZDMTIuMTUgMy42OCAxMC42NiAzIDkgMyA1LjY5IDMgMy4wMSA1LjY5IDMuMDEgOVM1LjY5IDE1IDkgMTVjMi45NyAwIDUuNDMtMi4xNiA1LjktNWgtMS41MmMtLjQ2IDItMi4yNCAzLjUtNC4zOCAzLjV6Ii8+CiAgICA8L2c+Cjwvc3ZnPgo=);--jp-icon-regex: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIwIDIwIj4KICA8ZyBjbGFzcz0ianAtaWNvbjIiIGZpbGw9IiM0MTQxNDEiPgogICAgPHJlY3QgeD0iMiIgeT0iMiIgd2lkdGg9IjE2IiBoZWlnaHQ9IjE2Ii8+CiAgPC9nPgoKICA8ZyBjbGFzcz0ianAtaWNvbi1hY2NlbnQyIiBmaWxsPSIjRkZGIj4KICAgIDxjaXJjbGUgY2xhc3M9InN0MiIgY3g9IjUuNSIgY3k9IjE0LjUiIHI9IjEuNSIvPgogICAgPHJlY3QgeD0iMTIiIHk9IjQiIGNsYXNzPSJzdDIiIHdpZHRoPSIxIiBoZWlnaHQ9IjgiLz4KICAgIDxyZWN0IHg9IjguNSIgeT0iNy41IiB0cmFuc2Zvcm09Im1hdHJpeCgwLjg2NiAtMC41IDAuNSAwLjg2NiAtMi4zMjU1IDcuMzIxOSkiIGNsYXNzPSJzdDIiIHdpZHRoPSI4IiBoZWlnaHQ9IjEiLz4KICAgIDxyZWN0IHg9IjEyIiB5PSI0IiB0cmFuc2Zvcm09Im1hdHJpeCgwLjUgLTAuODY2IDAuODY2IDAuNSAtMC42Nzc5IDE0LjgyNTIpIiBjbGFzcz0ic3QyIiB3aWR0aD0iMSIgaGVpZ2h0PSI4Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-run: url(data:image/svg+xml;base64,PHN2ZyBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTggNXYxNGwxMS03eiIvPgogICAgPC9nPgo8L3N2Zz4K);--jp-icon-running: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDUxMiA1MTIiPgogIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICA8cGF0aCBkPSJNMjU2IDhDMTE5IDggOCAxMTkgOCAyNTZzMTExIDI0OCAyNDggMjQ4IDI0OC0xMTEgMjQ4LTI0OFMzOTMgOCAyNTYgOHptOTYgMzI4YzAgOC44LTcuMiAxNi0xNiAxNkgxNzZjLTguOCAwLTE2LTcuMi0xNi0xNlYxNzZjMC04LjggNy4yLTE2IDE2LTE2aDE2MGM4LjggMCAxNiA3LjIgMTYgMTZ2MTYweiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-save: url(data:image/svg+xml;base64,PHN2ZyBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTE3IDNINWMtMS4xMSAwLTIgLjktMiAydjE0YzAgMS4xLjg5IDIgMiAyaDE0YzEuMSAwIDItLjkgMi0yVjdsLTQtNHptLTUgMTZjLTEuNjYgMC0zLTEuMzQtMy0zczEuMzQtMyAzLTMgMyAxLjM0IDMgMy0xLjM0IDMtMyAzem0zLTEwSDVWNWgxMHY0eiIvPgogICAgPC9nPgo8L3N2Zz4K);--jp-icon-search: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMTggMTgiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTEyLjEsMTAuOWgtMC43bC0wLjItMC4yYzAuOC0wLjksMS4zLTIuMiwxLjMtMy41YzAtMy0yLjQtNS40LTUuNC01LjRTMS44LDQuMiwxLjgsNy4xczIuNCw1LjQsNS40LDUuNCBjMS4zLDAsMi41LTAuNSwzLjUtMS4zbDAuMiwwLjJ2MC43bDQuMSw0LjFsMS4yLTEuMkwxMi4xLDEwLjl6IE03LjEsMTAuOWMtMi4xLDAtMy43LTEuNy0zLjctMy43czEuNy0zLjcsMy43LTMuN3MzLjcsMS43LDMuNywzLjcgUzkuMiwxMC45LDcuMSwxMC45eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-settings: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aCBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIiBkPSJNMTkuNDMgMTIuOThjLjA0LS4zMi4wNy0uNjQuMDctLjk4cy0uMDMtLjY2LS4wNy0uOThsMi4xMS0xLjY1Yy4xOS0uMTUuMjQtLjQyLjEyLS42NGwtMi0zLjQ2Yy0uMTItLjIyLS4zOS0uMy0uNjEtLjIybC0yLjQ5IDFjLS41Mi0uNC0xLjA4LS43My0xLjY5LS45OGwtLjM4LTIuNjVBLjQ4OC40ODggMCAwMDE0IDJoLTRjLS4yNSAwLS40Ni4xOC0uNDkuNDJsLS4zOCAyLjY1Yy0uNjEuMjUtMS4xNy41OS0xLjY5Ljk4bC0yLjQ5LTFjLS4yMy0uMDktLjQ5IDAtLjYxLjIybC0yIDMuNDZjLS4xMy4yMi0uMDcuNDkuMTIuNjRsMi4xMSAxLjY1Yy0uMDQuMzItLjA3LjY1LS4wNy45OHMuMDMuNjYuMDcuOThsLTIuMTEgMS42NWMtLjE5LjE1LS4yNC40Mi0uMTIuNjRsMiAzLjQ2Yy4xMi4yMi4zOS4zLjYxLjIybDIuNDktMWMuNTIuNCAxLjA4LjczIDEuNjkuOThsLjM4IDIuNjVjLjAzLjI0LjI0LjQyLjQ5LjQyaDRjLjI1IDAgLjQ2LS4xOC40OS0uNDJsLjM4LTIuNjVjLjYxLS4yNSAxLjE3LS41OSAxLjY5LS45OGwyLjQ5IDFjLjIzLjA5LjQ5IDAgLjYxLS4yMmwyLTMuNDZjLjEyLS4yMi4wNy0uNDktLjEyLS42NGwtMi4xMS0xLjY1ek0xMiAxNS41Yy0xLjkzIDAtMy41LTEuNTctMy41LTMuNXMxLjU3LTMuNSAzLjUtMy41IDMuNSAxLjU3IDMuNSAzLjUtMS41NyAzLjUtMy41IDMuNXoiLz4KPC9zdmc+Cg==);--jp-icon-share: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTYiIHZpZXdCb3g9IjAgMCAyNCAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTSAxOCAyIEMgMTYuMzU0OTkgMiAxNSAzLjM1NDk5MDQgMTUgNSBDIDE1IDUuMTkwOTUyOSAxNS4wMjE3OTEgNS4zNzcxMjI0IDE1LjA1NjY0MSA1LjU1ODU5MzggTCA3LjkyMTg3NSA5LjcyMDcwMzEgQyA3LjM5ODUzOTkgOS4yNzc4NTM5IDYuNzMyMDc3MSA5IDYgOSBDIDQuMzU0OTkwNCA5IDMgMTAuMzU0OTkgMyAxMiBDIDMgMTMuNjQ1MDEgNC4zNTQ5OTA0IDE1IDYgMTUgQyA2LjczMjA3NzEgMTUgNy4zOTg1Mzk5IDE0LjcyMjE0NiA3LjkyMTg3NSAxNC4yNzkyOTcgTCAxNS4wNTY2NDEgMTguNDM5NDUzIEMgMTUuMDIxNTU1IDE4LjYyMTUxNCAxNSAxOC44MDgzODYgMTUgMTkgQyAxNSAyMC42NDUwMSAxNi4zNTQ5OSAyMiAxOCAyMiBDIDE5LjY0NTAxIDIyIDIxIDIwLjY0NTAxIDIxIDE5IEMgMjEgMTcuMzU0OTkgMTkuNjQ1MDEgMTYgMTggMTYgQyAxNy4yNjc0OCAxNiAxNi42MDE1OTMgMTYuMjc5MzI4IDE2LjA3ODEyNSAxNi43MjI2NTYgTCA4Ljk0MzM1OTQgMTIuNTU4NTk0IEMgOC45NzgyMDk1IDEyLjM3NzEyMiA5IDEyLjE5MDk1MyA5IDEyIEMgOSAxMS44MDkwNDcgOC45NzgyMDk1IDExLjYyMjg3OCA4Ljk0MzM1OTQgMTEuNDQxNDA2IEwgMTYuMDc4MTI1IDcuMjc5Mjk2OSBDIDE2LjYwMTQ2IDcuNzIyMTQ2MSAxNy4yNjc5MjMgOCAxOCA4IEMgMTkuNjQ1MDEgOCAyMSA2LjY0NTAwOTYgMjEgNSBDIDIxIDMuMzU0OTkwNCAxOS42NDUwMSAyIDE4IDIgeiBNIDE4IDQgQyAxOC41NjQxMjkgNCAxOSA0LjQzNTg3MDYgMTkgNSBDIDE5IDUuNTY0MTI5NCAxOC41NjQxMjkgNiAxOCA2IEMgMTcuNDM1ODcxIDYgMTcgNS41NjQxMjk0IDE3IDUgQyAxNyA0LjQzNTg3MDYgMTcuNDM1ODcxIDQgMTggNCB6IE0gNiAxMSBDIDYuNTY0MTI5NCAxMSA3IDExLjQzNTg3MSA3IDEyIEMgNyAxMi41NjQxMjkgNi41NjQxMjk0IDEzIDYgMTMgQyA1LjQzNTg3MDYgMTMgNSAxMi41NjQxMjkgNSAxMiBDIDUgMTEuNDM1ODcxIDUuNDM1ODcwNiAxMSA2IDExIHogTSAxOCAxOCBDIDE4LjU2NDEyOSAxOCAxOSAxOC40MzU4NzEgMTkgMTkgQyAxOSAxOS41NjQxMjkgMTguNTY0MTI5IDIwIDE4IDIwIEMgMTcuNDM1ODcxIDIwIDE3IDE5LjU2NDEyOSAxNyAxOSBDIDE3IDE4LjQzNTg3MSAxNy40MzU4NzEgMTggMTggMTggeiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-spreadsheet: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8cGF0aCBjbGFzcz0ianAtaWNvbi1jb250cmFzdDEganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNENBRjUwIiBkPSJNMi4yIDIuMnYxNy42aDE3LjZWMi4ySDIuMnptMTUuNCA3LjdoLTUuNVY0LjRoNS41djUuNXpNOS45IDQuNHY1LjVINC40VjQuNGg1LjV6bS01LjUgNy43aDUuNXY1LjVINC40di01LjV6bTcuNyA1LjV2LTUuNWg1LjV2NS41aC01LjV6Ii8+Cjwvc3ZnPgo=);--jp-icon-stop: url(data:image/svg+xml;base64,PHN2ZyBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTAgMGgyNHYyNEgweiIgZmlsbD0ibm9uZSIvPgogICAgICAgIDxwYXRoIGQ9Ik02IDZoMTJ2MTJINnoiLz4KICAgIDwvZz4KPC9zdmc+Cg==);--jp-icon-tab: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTIxIDNIM2MtMS4xIDAtMiAuOS0yIDJ2MTRjMCAxLjEuOSAyIDIgMmgxOGMxLjEgMCAyLS45IDItMlY1YzAtMS4xLS45LTItMi0yem0wIDE2SDNWNWgxMHY0aDh2MTB6Ii8+CiAgPC9nPgo8L3N2Zz4K);--jp-icon-table-rows: url(data:image/svg+xml;base64,PHN2ZyBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTAgMGgyNHYyNEgweiIgZmlsbD0ibm9uZSIvPgogICAgICAgIDxwYXRoIGQ9Ik0yMSw4SDNWNGgxOFY4eiBNMjEsMTBIM3Y0aDE4VjEweiBNMjEsMTZIM3Y0aDE4VjE2eiIvPgogICAgPC9nPgo8L3N2Zz4=);--jp-icon-tag: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjgiIGhlaWdodD0iMjgiIHZpZXdCb3g9IjAgMCA0MyAyOCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCTxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CgkJPHBhdGggZD0iTTI4LjgzMzIgMTIuMzM0TDMyLjk5OTggMTYuNTAwN0wzNy4xNjY1IDEyLjMzNEgyOC44MzMyWiIvPgoJCTxwYXRoIGQ9Ik0xNi4yMDk1IDIxLjYxMDRDMTUuNjg3MyAyMi4xMjk5IDE0Ljg0NDMgMjIuMTI5OSAxNC4zMjQ4IDIxLjYxMDRMNi45ODI5IDE0LjcyNDVDNi41NzI0IDE0LjMzOTQgNi4wODMxMyAxMy42MDk4IDYuMDQ3ODYgMTMuMDQ4MkM1Ljk1MzQ3IDExLjUyODggNi4wMjAwMiA4LjYxOTQ0IDYuMDY2MjEgNy4wNzY5NUM2LjA4MjgxIDYuNTE0NzcgNi41NTU0OCA2LjA0MzQ3IDcuMTE4MDQgNi4wMzA1NUM5LjA4ODYzIDUuOTg0NzMgMTMuMjYzOCA1LjkzNTc5IDEzLjY1MTggNi4zMjQyNUwyMS43MzY5IDEzLjYzOUMyMi4yNTYgMTQuMTU4NSAyMS43ODUxIDE1LjQ3MjQgMjEuMjYyIDE1Ljk5NDZMMTYuMjA5NSAyMS42MTA0Wk05Ljc3NTg1IDguMjY1QzkuMzM1NTEgNy44MjU2NiA4LjYyMzUxIDcuODI1NjYgOC4xODI4IDguMjY1QzcuNzQzNDYgOC43MDU3MSA3Ljc0MzQ2IDkuNDE3MzMgOC4xODI4IDkuODU2NjdDOC42MjM4MiAxMC4yOTY0IDkuMzM1ODIgMTAuMjk2NCA5Ljc3NTg1IDkuODU2NjdDMTAuMjE1NiA5LjQxNzMzIDEwLjIxNTYgOC43MDUzMyA5Ljc3NTg1IDguMjY1WiIvPgoJPC9nPgo8L3N2Zz4K);--jp-icon-terminal: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0IiA+CiAgICA8cmVjdCBjbGFzcz0ianAtdGVybWluYWwtaWNvbi1iYWNrZ3JvdW5kLWNvbG9yIGpwLWljb24tc2VsZWN0YWJsZSIgd2lkdGg9IjIwIiBoZWlnaHQ9IjIwIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgyIDIpIiBmaWxsPSIjMzMzMzMzIi8+CiAgICA8cGF0aCBjbGFzcz0ianAtdGVybWluYWwtaWNvbi1jb2xvciBqcC1pY29uLXNlbGVjdGFibGUtaW52ZXJzZSIgZD0iTTUuMDU2NjQgOC43NjE3MkM1LjA1NjY0IDguNTk3NjYgNS4wMzEyNSA4LjQ1MzEyIDQuOTgwNDcgOC4zMjgxMkM0LjkzMzU5IDguMTk5MjIgNC44NTU0NyA4LjA4MjAzIDQuNzQ2MDkgNy45NzY1NkM0LjY0MDYyIDcuODcxMDkgNC41IDcuNzc1MzkgNC4zMjQyMiA3LjY4OTQ1QzQuMTUyMzQgNy41OTk2MSAzLjk0MzM2IDcuNTExNzIgMy42OTcyNyA3LjQyNTc4QzMuMzAyNzMgNy4yODUxNiAyLjk0MzM2IDcuMTM2NzIgMi42MTkxNCA2Ljk4MDQ3QzIuMjk0OTIgNi44MjQyMiAyLjAxNzU4IDYuNjQyNTggMS43ODcxMSA2LjQzNTU1QzEuNTYwNTUgNi4yMjg1MiAxLjM4NDc3IDUuOTg4MjggMS4yNTk3NyA1LjcxNDg0QzEuMTM0NzcgNS40Mzc1IDEuMDcyMjcgNS4xMDkzOCAxLjA3MjI3IDQuNzMwNDdDMS4wNzIyNyA0LjM5ODQ0IDEuMTI4OTEgNC4wOTU3IDEuMjQyMTkgMy44MjIyN0MxLjM1NTQ3IDMuNTQ0OTIgMS41MTU2MiAzLjMwNDY5IDEuNzIyNjYgMy4xMDE1NkMxLjkyOTY5IDIuODk4NDQgMi4xNzk2OSAyLjczNDM3IDIuNDcyNjYgMi42MDkzOEMyLjc2NTYyIDIuNDg0MzggMy4wOTE4IDIuNDA0MyAzLjQ1MTE3IDIuMzY5MTRWMS4xMDkzOEg0LjM4ODY3VjIuMzgwODZDNC43NDAyMyAyLjQyNzczIDUuMDU2NjQgMi41MjM0NCA1LjMzNzg5IDIuNjY3OTdDNS42MTkxNCAyLjgxMjUgNS44NTc0MiAzLjAwMTk1IDYuMDUyNzMgMy4yMzYzM0M2LjI1MTk1IDMuNDY2OCA2LjQwNDMgMy43NDAyMyA2LjUwOTc3IDQuMDU2NjRDNi42MTkxNCA0LjM2OTE0IDYuNjczODMgNC43MjA3IDYuNjczODMgNS4xMTEzM0g1LjA0NDkyQzUuMDQ0OTIgNC42Mzg2NyA0LjkzNzUgNC4yODEyNSA0LjcyMjY2IDQuMDM5MDZDNC41MDc4MSAzLjc5Mjk3IDQuMjE2OCAzLjY2OTkyIDMuODQ5NjEgMy42Njk5MkMzLjY1MDM5IDMuNjY5OTIgMy40NzY1NiAzLjY5NzI3IDMuMzI4MTIgMy43NTE5NUMzLjE4MzU5IDMuODAyNzMgMy4wNjQ0NSAzLjg3Njk1IDIuOTcwNyAzLjk3NDYxQzIuODc2OTUgNC4wNjgzNiAyLjgwNjY0IDQuMTc5NjkgMi43NTk3NyA0LjMwODU5QzIuNzE2OCA0LjQzNzUgMi42OTUzMSA0LjU3ODEyIDIuNjk1MzEgNC43MzA0N0MyLjY5NTMxIDQuODgyODEgMi43MTY4IDUuMDE5NTMgMi43NTk3NyA1LjE0MDYyQzIuODA2NjQgNS4yNTc4MSAyLjg4MjgxIDUuMzY3MTkgMi45ODgyOCA1LjQ2ODc1QzMuMDk3NjYgNS41NzAzMSAzLjI0MDIzIDUuNjY3OTcgMy40MTYwMiA1Ljc2MTcyQzMuNTkxOCA1Ljg1MTU2IDMuODEwNTUgNS45NDMzNiA0LjA3MjI3IDYuMDM3MTFDNC40NjY4IDYuMTg1NTUgNC44MjQyMiA2LjMzOTg0IDUuMTQ0NTMgNi41QzUuNDY0ODQgNi42NTYyNSA1LjczODI4IDYuODM5ODQgNS45NjQ4NCA3LjA1MDc4QzYuMTk1MzEgNy4yNTc4MSA2LjM3MTA5IDcuNSA2LjQ5MjE5IDcuNzc3MzRDNi42MTcxOSA4LjA1MDc4IDYuNjc5NjkgOC4zNzUgNi42Nzk2OSA4Ljc1QzYuNjc5NjkgOS4wOTM3NSA2LjYyMzA1IDkuNDA0MyA2LjUwOTc3IDkuNjgxNjRDNi4zOTY0OCA5Ljk1NTA4IDYuMjM0MzggMTAuMTkxNCA2LjAyMzQ0IDEwLjM5MDZDNS44MTI1IDEwLjU4OTggNS41NTg1OSAxMC43NSA1LjI2MTcyIDEwLjg3MTFDNC45NjQ4NCAxMC45ODgzIDQuNjMyODEgMTEuMDY0NSA0LjI2NTYyIDExLjA5OTZWMTIuMjQ4SDMuMzMzOThWMTEuMDk5NkMzLjAwMTk1IDExLjA2ODQgMi42Nzk2OSAxMC45OTYxIDIuMzY3MTkgMTAuODgyOEMyLjA1NDY5IDEwLjc2NTYgMS43NzczNCAxMC41OTc3IDEuNTM1MTYgMTAuMzc4OUMxLjI5Njg4IDEwLjE2MDIgMS4xMDU0NyA5Ljg4NDc3IDAuOTYwOTM4IDkuNTUyNzNDMC44MTY0MDYgOS4yMTY4IDAuNzQ0MTQxIDguODE0NDUgMC43NDQxNDEgOC4zNDU3SDIuMzc4OTFDMi4zNzg5MSA4LjYyNjk1IDIuNDE5OTIgOC44NjMyOCAyLjUwMTk1IDkuMDU0NjlDMi41ODM5OCA5LjI0MjE5IDIuNjg5NDUgOS4zOTI1OCAyLjgxODM2IDkuNTA1ODZDMi45NTExNyA5LjYxNTIzIDMuMTAxNTYgOS42OTMzNiAzLjI2OTUzIDkuNzQwMjNDMy40Mzc1IDkuNzg3MTEgMy42MDkzOCA5LjgxMDU1IDMuNzg1MTYgOS44MTA1NUM0LjIwMzEyIDkuODEwNTUgNC41MTk1MyA5LjcxMjg5IDQuNzM0MzggOS41MTc1OEM0Ljk0OTIyIDkuMzIyMjcgNS4wNTY2NCA5LjA3MDMxIDUuMDU2NjQgOC43NjE3MlpNMTMuNDE4IDEyLjI3MTVIOC4wNzQyMlYxMUgxMy40MThWMTIuMjcxNVoiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDMuOTUyNjQgNikiIGZpbGw9IndoaXRlIi8+Cjwvc3ZnPgo=);--jp-icon-text-editor: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aCBjbGFzcz0ianAtdGV4dC1lZGl0b3ItaWNvbi1jb2xvciBqcC1pY29uLXNlbGVjdGFibGUiIGZpbGw9IiM2MTYxNjEiIGQ9Ik0xNSAxNUgzdjJoMTJ2LTJ6bTAtOEgzdjJoMTJWN3pNMyAxM2gxOHYtMkgzdjJ6bTAgOGgxOHYtMkgzdjJ6TTMgM3YyaDE4VjNIM3oiLz4KPC9zdmc+Cg==);--jp-icon-toc: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8ZyBjbGFzcz0ianAtaWNvbjMganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjNjE2MTYxIj4KICAgIDxwYXRoIGQ9Ik03LDVIMjFWN0g3VjVNNywxM1YxMUgyMVYxM0g3TTQsNC41QTEuNSwxLjUgMCAwLDEgNS41LDZBMS41LDEuNSAwIDAsMSA0LDcuNUExLjUsMS41IDAgMCwxIDIuNSw2QTEuNSwxLjUgMCAwLDEgNCw0LjVNNCwxMC41QTEuNSwxLjUgMCAwLDEgNS41LDEyQTEuNSwxLjUgMCAwLDEgNCwxMy41QTEuNSwxLjUgMCAwLDEgMi41LDEyQTEuNSwxLjUgMCAwLDEgNCwxMC41TTcsMTlWMTdIMjFWMTlIN000LDE2LjVBMS41LDEuNSAwIDAsMSA1LjUsMThBMS41LDEuNSAwIDAsMSA0LDE5LjVBMS41LDEuNSAwIDAsMSAyLjUsMThBMS41LDEuNSAwIDAsMSA0LDE2LjVaIiAvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-tree-view: url(data:image/svg+xml;base64,PHN2ZyBoZWlnaHQ9IjI0IiB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICAgIDxnIGNsYXNzPSJqcC1pY29uMyIgZmlsbD0iIzYxNjE2MSI+CiAgICAgICAgPHBhdGggZD0iTTAgMGgyNHYyNEgweiIgZmlsbD0ibm9uZSIvPgogICAgICAgIDxwYXRoIGQ9Ik0yMiAxMVYzaC03djNIOVYzSDJ2OGg3VjhoMnYxMGg0djNoN3YtOGgtN3YzaC0yVjhoMnYzeiIvPgogICAgPC9nPgo8L3N2Zz4=);--jp-icon-trusted: url(data:image/svg+xml;base64,PHN2ZyBmaWxsPSJub25lIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDI0IDI1Ij4KICAgIDxwYXRoIGNsYXNzPSJqcC1pY29uMiIgc3Ryb2tlPSIjMzMzMzMzIiBzdHJva2Utd2lkdGg9IjIiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDIgMykiIGQ9Ik0xLjg2MDk0IDExLjQ0MDlDMC44MjY0NDggOC43NzAyNyAwLjg2Mzc3OSA2LjA1NzY0IDEuMjQ5MDcgNC4xOTkzMkMyLjQ4MjA2IDMuOTMzNDcgNC4wODA2OCAzLjQwMzQ3IDUuNjAxMDIgMi44NDQ5QzcuMjM1NDkgMi4yNDQ0IDguODU2NjYgMS41ODE1IDkuOTg3NiAxLjA5NTM5QzExLjA1OTcgMS41ODM0MSAxMi42MDk0IDIuMjQ0NCAxNC4yMTggMi44NDMzOUMxNS43NTAzIDMuNDEzOTQgMTcuMzk5NSAzLjk1MjU4IDE4Ljc1MzkgNC4yMTM4NUMxOS4xMzY0IDYuMDcxNzcgMTkuMTcwOSA4Ljc3NzIyIDE4LjEzOSAxMS40NDA5QzE3LjAzMDMgMTQuMzAzMiAxNC42NjY4IDE3LjE4NDQgOS45OTk5OSAxOC45MzU0QzUuMzMzMiAxNy4xODQ0IDIuOTY5NjggMTQuMzAzMiAxLjg2MDk0IDExLjQ0MDlaIi8+CiAgICA8cGF0aCBjbGFzcz0ianAtaWNvbjIiIGZpbGw9IiMzMzMzMzMiIHN0cm9rZT0iIzMzMzMzMyIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoOCA5Ljg2NzE5KSIgZD0iTTIuODYwMTUgNC44NjUzNUwwLjcyNjU0OSAyLjk5OTU5TDAgMy42MzA0NUwyLjg2MDE1IDYuMTMxNTdMOCAwLjYzMDg3Mkw3LjI3ODU3IDBMMi44NjAxNSA0Ljg2NTM1WiIvPgo8L3N2Zz4K);--jp-icon-undo: url(data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTEyLjUgOGMtMi42NSAwLTUuMDUuOTktNi45IDIuNkwyIDd2OWg5bC0zLjYyLTMuNjJjMS4zOS0xLjE2IDMuMTYtMS44OCA1LjEyLTEuODggMy41NCAwIDYuNTUgMi4zMSA3LjYgNS41bDIuMzctLjc4QzIxLjA4IDExLjAzIDE3LjE1IDggMTIuNSA4eiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-user: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTYiIHZpZXdCb3g9IjAgMCAyNCAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KICA8ZyBjbGFzcz0ianAtaWNvbjMiIGZpbGw9IiM2MTYxNjEiPgogICAgPHBhdGggZD0iTTE2IDdhNCA0IDAgMTEtOCAwIDQgNCAwIDAxOCAwek0xMiAxNGE3IDcgMCAwMC03IDdoMTRhNyA3IDAgMDAtNy03eiIvPgogIDwvZz4KPC9zdmc+);--jp-icon-users: url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZlcnNpb249IjEuMSIgdmlld0JveD0iMCAwIDM2IDI0IiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciPgogPGcgY2xhc3M9ImpwLWljb24zIiB0cmFuc2Zvcm09Im1hdHJpeCgxLjczMjcgMCAwIDEuNzMyNyAtMy42MjgyIC4wOTk1NzcpIiBmaWxsPSIjNjE2MTYxIj4KICA8cGF0aCB0cmFuc2Zvcm09Im1hdHJpeCgxLjUsMCwwLDEuNSwwLC02KSIgZD0ibTEyLjE4NiA3LjUwOThjLTEuMDUzNSAwLTEuOTc1NyAwLjU2NjUtMi40Nzg1IDEuNDEwMiAwLjc1MDYxIDAuMzEyNzcgMS4zOTc0IDAuODI2NDggMS44NzMgMS40NzI3aDMuNDg2M2MwLTEuNTkyLTEuMjg4OS0yLjg4MjgtMi44ODA5LTIuODgyOHoiLz4KICA8cGF0aCBkPSJtMjAuNDY1IDIuMzg5NWEyLjE4ODUgMi4xODg1IDAgMCAxLTIuMTg4NCAyLjE4ODUgMi4xODg1IDIuMTg4NSAwIDAgMS0yLjE4ODUtMi4xODg1IDIuMTg4NSAyLjE4ODUgMCAwIDEgMi4xODg1LTIuMTg4NSAyLjE4ODUgMi4xODg1IDAgMCAxIDIuMTg4NCAyLjE4ODV6Ii8+CiAgPHBhdGggdHJhbnNmb3JtPSJtYXRyaXgoMS41LDAsMCwxLjUsMCwtNikiIGQ9Im0zLjU4OTggOC40MjE5Yy0xLjExMjYgMC0yLjAxMzcgMC45MDExMS0yLjAxMzcgMi4wMTM3aDIuODE0NWMwLjI2Nzk3LTAuMzczMDkgMC41OTA3LTAuNzA0MzUgMC45NTg5OC0wLjk3ODUyLTAuMzQ0MzMtMC42MTY4OC0xLjAwMzEtMS4wMzUyLTEuNzU5OC0xLjAzNTJ6Ii8+CiAgPHBhdGggZD0ibTYuOTE1NCA0LjYyM2ExLjUyOTQgMS41Mjk0IDAgMCAxLTEuNTI5NCAxLjUyOTQgMS41Mjk0IDEuNTI5NCAwIDAgMS0xLjUyOTQtMS41Mjk0IDEuNTI5NCAxLjUyOTQgMCAwIDEgMS41Mjk0LTEuNTI5NCAxLjUyOTQgMS41Mjk0IDAgMCAxIDEuNTI5NCAxLjUyOTR6Ii8+CiAgPHBhdGggZD0ibTYuMTM1IDEzLjUzNWMwLTMuMjM5MiAyLjYyNTktNS44NjUgNS44NjUtNS44NjUgMy4yMzkyIDAgNS44NjUgMi42MjU5IDUuODY1IDUuODY1eiIvPgogIDxjaXJjbGUgY3g9IjEyIiBjeT0iMy43Njg1IiByPSIyLjk2ODUiLz4KIDwvZz4KPC9zdmc+Cg==);--jp-icon-vega: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8ZyBjbGFzcz0ianAtaWNvbjEganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjMjEyMTIxIj4KICAgIDxwYXRoIGQ9Ik0xMC42IDUuNGwyLjItMy4ySDIuMnY3LjNsNC02LjZ6Ii8+CiAgICA8cGF0aCBkPSJNMTUuOCAyLjJsLTQuNCA2LjZMNyA2LjNsLTQuOCA4djUuNWgxNy42VjIuMmgtNHptLTcgMTUuNEg1LjV2LTQuNGgzLjN2NC40em00LjQgMEg5LjhWOS44aDMuNHY3Ljh6bTQuNCAwaC0zLjRWNi41aDMuNHYxMS4xeiIvPgogIDwvZz4KPC9zdmc+Cg==);--jp-icon-yaml: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxNiIgdmlld0JveD0iMCAwIDIyIDIyIj4KICA8ZyBjbGFzcz0ianAtaWNvbi1jb250cmFzdDIganAtaWNvbi1zZWxlY3RhYmxlIiBmaWxsPSIjRDgxQjYwIj4KICAgIDxwYXRoIGQ9Ik03LjIgMTguNnYtNS40TDMgNS42aDMuM2wxLjQgMy4xYy4zLjkuNiAxLjYgMSAyLjUuMy0uOC42LTEuNiAxLTIuNWwxLjQtMy4xaDMuNGwtNC40IDcuNnY1LjVsLTIuOS0uMXoiLz4KICAgIDxjaXJjbGUgY2xhc3M9InN0MCIgY3g9IjE3LjYiIGN5PSIxNi41IiByPSIyLjEiLz4KICAgIDxjaXJjbGUgY2xhc3M9InN0MCIgY3g9IjE3LjYiIGN5PSIxMSIgcj0iMi4xIi8+CiAgPC9nPgo8L3N2Zz4K)}.jupyter-wrapper .jp-AddAboveIcon{background-image:var(--jp-icon-add-above)}.jupyter-wrapper .jp-AddBelowIcon{background-image:var(--jp-icon-add-below)}.jupyter-wrapper .jp-AddIcon{background-image:var(--jp-icon-add)}.jupyter-wrapper .jp-BellIcon{background-image:var(--jp-icon-bell)}.jupyter-wrapper .jp-BugDotIcon{background-image:var(--jp-icon-bug-dot)}.jupyter-wrapper .jp-BugIcon{background-image:var(--jp-icon-bug)}.jupyter-wrapper .jp-BuildIcon{background-image:var(--jp-icon-build)}.jupyter-wrapper .jp-CaretDownEmptyIcon{background-image:var(--jp-icon-caret-down-empty)}.jupyter-wrapper .jp-CaretDownEmptyThinIcon{background-image:var(--jp-icon-caret-down-empty-thin)}.jupyter-wrapper .jp-CaretDownIcon{background-image:var(--jp-icon-caret-down)}.jupyter-wrapper .jp-CaretLeftIcon{background-image:var(--jp-icon-caret-left)}.jupyter-wrapper .jp-CaretRightIcon{background-image:var(--jp-icon-caret-right)}.jupyter-wrapper .jp-CaretUpEmptyThinIcon{background-image:var(--jp-icon-caret-up-empty-thin)}.jupyter-wrapper .jp-CaretUpIcon{background-image:var(--jp-icon-caret-up)}.jupyter-wrapper .jp-CaseSensitiveIcon{background-image:var(--jp-icon-case-sensitive)}.jupyter-wrapper .jp-CheckIcon{background-image:var(--jp-icon-check)}.jupyter-wrapper .jp-CircleEmptyIcon{background-image:var(--jp-icon-circle-empty)}.jupyter-wrapper .jp-CircleIcon{background-image:var(--jp-icon-circle)}.jupyter-wrapper .jp-ClearIcon{background-image:var(--jp-icon-clear)}.jupyter-wrapper .jp-CloseIcon{background-image:var(--jp-icon-close)}.jupyter-wrapper .jp-CodeIcon{background-image:var(--jp-icon-code)}.jupyter-wrapper .jp-ConsoleIcon{background-image:var(--jp-icon-console)}.jupyter-wrapper .jp-CopyIcon{background-image:var(--jp-icon-copy)}.jupyter-wrapper .jp-CopyrightIcon{background-image:var(--jp-icon-copyright)}.jupyter-wrapper .jp-CutIcon{background-image:var(--jp-icon-cut)}.jupyter-wrapper .jp-DeleteIcon{background-image:var(--jp-icon-delete)}.jupyter-wrapper .jp-DownloadIcon{background-image:var(--jp-icon-download)}.jupyter-wrapper .jp-DuplicateIcon{background-image:var(--jp-icon-duplicate)}.jupyter-wrapper .jp-EditIcon{background-image:var(--jp-icon-edit)}.jupyter-wrapper .jp-EllipsesIcon{background-image:var(--jp-icon-ellipses)}.jupyter-wrapper .jp-ExtensionIcon{background-image:var(--jp-icon-extension)}.jupyter-wrapper .jp-FastForwardIcon{background-image:var(--jp-icon-fast-forward)}.jupyter-wrapper .jp-FileIcon{background-image:var(--jp-icon-file)}.jupyter-wrapper .jp-FileUploadIcon{background-image:var(--jp-icon-file-upload)}.jupyter-wrapper .jp-FilterListIcon{background-image:var(--jp-icon-filter-list)}.jupyter-wrapper .jp-FolderFavoriteIcon{background-image:var(--jp-icon-folder-favorite)}.jupyter-wrapper .jp-FolderIcon{background-image:var(--jp-icon-folder)}.jupyter-wrapper .jp-HomeIcon{background-image:var(--jp-icon-home)}.jupyter-wrapper .jp-Html5Icon{background-image:var(--jp-icon-html5)}.jupyter-wrapper .jp-ImageIcon{background-image:var(--jp-icon-image)}.jupyter-wrapper .jp-InspectorIcon{background-image:var(--jp-icon-inspector)}.jupyter-wrapper .jp-JsonIcon{background-image:var(--jp-icon-json)}.jupyter-wrapper .jp-JuliaIcon{background-image:var(--jp-icon-julia)}.jupyter-wrapper .jp-JupyterFaviconIcon{background-image:var(--jp-icon-jupyter-favicon)}.jupyter-wrapper .jp-JupyterIcon{background-image:var(--jp-icon-jupyter)}.jupyter-wrapper .jp-JupyterlabWordmarkIcon{background-image:var(--jp-icon-jupyterlab-wordmark)}.jupyter-wrapper .jp-KernelIcon{background-image:var(--jp-icon-kernel)}.jupyter-wrapper .jp-KeyboardIcon{background-image:var(--jp-icon-keyboard)}.jupyter-wrapper .jp-LaunchIcon{background-image:var(--jp-icon-launch)}.jupyter-wrapper .jp-LauncherIcon{background-image:var(--jp-icon-launcher)}.jupyter-wrapper .jp-LineFormIcon{background-image:var(--jp-icon-line-form)}.jupyter-wrapper .jp-LinkIcon{background-image:var(--jp-icon-link)}.jupyter-wrapper .jp-ListIcon{background-image:var(--jp-icon-list)}.jupyter-wrapper .jp-ListingsInfoIcon{background-image:var(--jp-icon-listings-info)}.jupyter-wrapper .jp-MarkdownIcon{background-image:var(--jp-icon-markdown)}.jupyter-wrapper .jp-MoveDownIcon{background-image:var(--jp-icon-move-down)}.jupyter-wrapper .jp-MoveUpIcon{background-image:var(--jp-icon-move-up)}.jupyter-wrapper .jp-NewFolderIcon{background-image:var(--jp-icon-new-folder)}.jupyter-wrapper .jp-NotTrustedIcon{background-image:var(--jp-icon-not-trusted)}.jupyter-wrapper .jp-NotebookIcon{background-image:var(--jp-icon-notebook)}.jupyter-wrapper .jp-NumberingIcon{background-image:var(--jp-icon-numbering)}.jupyter-wrapper .jp-OfflineBoltIcon{background-image:var(--jp-icon-offline-bolt)}.jupyter-wrapper .jp-PaletteIcon{background-image:var(--jp-icon-palette)}.jupyter-wrapper .jp-PasteIcon{background-image:var(--jp-icon-paste)}.jupyter-wrapper .jp-PdfIcon{background-image:var(--jp-icon-pdf)}.jupyter-wrapper .jp-PythonIcon{background-image:var(--jp-icon-python)}.jupyter-wrapper .jp-RKernelIcon{background-image:var(--jp-icon-r-kernel)}.jupyter-wrapper .jp-ReactIcon{background-image:var(--jp-icon-react)}.jupyter-wrapper .jp-RedoIcon{background-image:var(--jp-icon-redo)}.jupyter-wrapper .jp-RefreshIcon{background-image:var(--jp-icon-refresh)}.jupyter-wrapper .jp-RegexIcon{background-image:var(--jp-icon-regex)}.jupyter-wrapper .jp-RunIcon{background-image:var(--jp-icon-run)}.jupyter-wrapper .jp-RunningIcon{background-image:var(--jp-icon-running)}.jupyter-wrapper .jp-SaveIcon{background-image:var(--jp-icon-save)}.jupyter-wrapper .jp-SearchIcon{background-image:var(--jp-icon-search)}.jupyter-wrapper .jp-SettingsIcon{background-image:var(--jp-icon-settings)}.jupyter-wrapper .jp-ShareIcon{background-image:var(--jp-icon-share)}.jupyter-wrapper .jp-SpreadsheetIcon{background-image:var(--jp-icon-spreadsheet)}.jupyter-wrapper .jp-StopIcon{background-image:var(--jp-icon-stop)}.jupyter-wrapper .jp-TabIcon{background-image:var(--jp-icon-tab)}.jupyter-wrapper .jp-TableRowsIcon{background-image:var(--jp-icon-table-rows)}.jupyter-wrapper .jp-TagIcon{background-image:var(--jp-icon-tag)}.jupyter-wrapper .jp-TerminalIcon{background-image:var(--jp-icon-terminal)}.jupyter-wrapper .jp-TextEditorIcon{background-image:var(--jp-icon-text-editor)}.jupyter-wrapper .jp-TocIcon{background-image:var(--jp-icon-toc)}.jupyter-wrapper .jp-TreeViewIcon{background-image:var(--jp-icon-tree-view)}.jupyter-wrapper .jp-TrustedIcon{background-image:var(--jp-icon-trusted)}.jupyter-wrapper .jp-UndoIcon{background-image:var(--jp-icon-undo)}.jupyter-wrapper .jp-UserIcon{background-image:var(--jp-icon-user)}.jupyter-wrapper .jp-UsersIcon{background-image:var(--jp-icon-users)}.jupyter-wrapper .jp-VegaIcon{background-image:var(--jp-icon-vega)}.jupyter-wrapper .jp-YamlIcon{background-image:var(--jp-icon-yaml)}.jupyter-wrapper .jp-Icon,.jupyter-wrapper .jp-MaterialIcon{background-position:center;background-repeat:no-repeat;background-size:16px;min-width:16px;min-height:16px}.jupyter-wrapper .jp-Icon-cover{background-position:center;background-repeat:no-repeat;background-size:cover}.jupyter-wrapper .jp-Icon-16{background-size:16px;min-width:16px;min-height:16px}.jupyter-wrapper .jp-Icon-18{background-size:18px;min-width:18px;min-height:18px}.jupyter-wrapper .jp-Icon-20{background-size:20px;min-width:20px;min-height:20px}.jupyter-wrapper .lm-TabBar .lm-TabBar-addButton{align-items:center;display:flex;padding:4px 4px 5px;margin-right:1px;background-color:var(--jp-layout-color2)}.jupyter-wrapper .lm-TabBar .lm-TabBar-addButton:hover{background-color:var(--jp-layout-color1)}.jupyter-wrapper .lm-DockPanel-tabBar .lm-TabBar-tab{width:var(--jp-private-horizontal-tab-width)}.jupyter-wrapper .lm-DockPanel-tabBar .lm-TabBar-content{flex:unset}.jupyter-wrapper .lm-DockPanel-tabBar[data-orientation=horizontal]{flex:1 1 auto}.jupyter-wrapper .jp-icon0[fill]{fill:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon1[fill]{fill:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon2[fill]{fill:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon3[fill]{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon4[fill]{fill:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon0[stroke]{stroke:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon1[stroke]{stroke:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon2[stroke]{stroke:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon3[stroke]{stroke:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon4[stroke]{stroke:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon-accent0[fill]{fill:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-accent1[fill]{fill:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-accent2[fill]{fill:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-accent3[fill]{fill:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-accent4[fill]{fill:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-accent0[stroke]{stroke:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-accent1[stroke]{stroke:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-accent2[stroke]{stroke:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-accent3[stroke]{stroke:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-accent4[stroke]{stroke:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-none[fill]{fill:none}.jupyter-wrapper .jp-icon-none[stroke]{stroke:none}.jupyter-wrapper .jp-icon-brand0[fill]{fill:var(--jp-brand-color0)}.jupyter-wrapper .jp-icon-brand1[fill]{fill:var(--jp-brand-color1)}.jupyter-wrapper .jp-icon-brand2[fill]{fill:var(--jp-brand-color2)}.jupyter-wrapper .jp-icon-brand3[fill]{fill:var(--jp-brand-color3)}.jupyter-wrapper .jp-icon-brand4[fill]{fill:var(--jp-brand-color4)}.jupyter-wrapper .jp-icon-brand0[stroke]{stroke:var(--jp-brand-color0)}.jupyter-wrapper .jp-icon-brand1[stroke]{stroke:var(--jp-brand-color1)}.jupyter-wrapper .jp-icon-brand2[stroke]{stroke:var(--jp-brand-color2)}.jupyter-wrapper .jp-icon-brand3[stroke]{stroke:var(--jp-brand-color3)}.jupyter-wrapper .jp-icon-brand4[stroke]{stroke:var(--jp-brand-color4)}.jupyter-wrapper .jp-icon-warn0[fill]{fill:var(--jp-warn-color0)}.jupyter-wrapper .jp-icon-warn1[fill]{fill:var(--jp-warn-color1)}.jupyter-wrapper .jp-icon-warn2[fill]{fill:var(--jp-warn-color2)}.jupyter-wrapper .jp-icon-warn3[fill]{fill:var(--jp-warn-color3)}.jupyter-wrapper .jp-icon-warn0[stroke]{stroke:var(--jp-warn-color0)}.jupyter-wrapper .jp-icon-warn1[stroke]{stroke:var(--jp-warn-color1)}.jupyter-wrapper .jp-icon-warn2[stroke]{stroke:var(--jp-warn-color2)}.jupyter-wrapper .jp-icon-warn3[stroke]{stroke:var(--jp-warn-color3)}.jupyter-wrapper .jp-icon-contrast0[fill]{fill:var(--jp-icon-contrast-color0)}.jupyter-wrapper .jp-icon-contrast1[fill]{fill:var(--jp-icon-contrast-color1)}.jupyter-wrapper .jp-icon-contrast2[fill]{fill:var(--jp-icon-contrast-color2)}.jupyter-wrapper .jp-icon-contrast3[fill]{fill:var(--jp-icon-contrast-color3)}.jupyter-wrapper .jp-icon-contrast0[stroke]{stroke:var(--jp-icon-contrast-color0)}.jupyter-wrapper .jp-icon-contrast1[stroke]{stroke:var(--jp-icon-contrast-color1)}.jupyter-wrapper .jp-icon-contrast2[stroke]{stroke:var(--jp-icon-contrast-color2)}.jupyter-wrapper .jp-icon-contrast3[stroke]{stroke:var(--jp-icon-contrast-color3)}.jupyter-wrapper .jp-jupyter-icon-color[fill]{fill:var(--jp-jupyter-icon-color, var(--jp-warn-color0))}.jupyter-wrapper .jp-notebook-icon-color[fill]{fill:var(--jp-notebook-icon-color, var(--jp-warn-color0))}.jupyter-wrapper .jp-json-icon-color[fill]{fill:var(--jp-json-icon-color, var(--jp-warn-color1))}.jupyter-wrapper .jp-console-icon-color[fill]{fill:var(--jp-console-icon-color, white)}.jupyter-wrapper .jp-console-icon-background-color[fill]{fill:var(--jp-console-icon-background-color, var(--jp-brand-color1))}.jupyter-wrapper .jp-terminal-icon-color[fill]{fill:var(--jp-terminal-icon-color, var(--jp-layout-color2))}.jupyter-wrapper .jp-terminal-icon-background-color[fill]{fill:var(--jp-terminal-icon-background-color, var(--jp-inverse-layout2))}.jupyter-wrapper .jp-text-editor-icon-color[fill]{fill:var(--jp-text-editor-icon-color, var(--jp-inverse-layout3))}.jupyter-wrapper .jp-inspector-icon-color[fill]{fill:var(--jp-inspector-icon-color, var(--jp-inverse-layout3))}.jupyter-wrapper .jp-DirListing-item.jp-mod-selected .jp-icon-selectable[fill]{fill:#fff}.jupyter-wrapper .jp-DirListing-item.jp-mod-selected .jp-icon-selectable-inverse[fill]{fill:var(--jp-brand-color1)}.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-active .jp-icon-selectable[fill]{fill:#fff}.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-active .jp-icon-selectable-inverse[fill],.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-active .jp-icon-hover :hover .jp-icon-selectable[fill]{fill:var(--jp-brand-color1)}.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-active .jp-icon-hover :hover .jp-icon-selectable-inverse[fill]{fill:#fff}.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-dirty>.lm-TabBar-tabCloseIcon>:not(:hover)>.jp-icon3[fill]{fill:none}.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-dirty>.lm-TabBar-tabCloseIcon>:not(:hover)>.jp-icon-busy[fill]{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper #tab-manager .lm-TabBar-tab.jp-mod-dirty.jp-mod-active>.lm-TabBar-tabCloseIcon>:not(:hover)>.jp-icon-busy[fill]{fill:#fff}.jupyter-wrapper .lm-DockPanel-tabBar .lm-TabBar-tab.lm-mod-closable.jp-mod-dirty>.lm-TabBar-tabCloseIcon>:not(:hover)>.jp-icon3[fill]{fill:none}.jupyter-wrapper .lm-DockPanel-tabBar .lm-TabBar-tab.lm-mod-closable.jp-mod-dirty>.lm-TabBar-tabCloseIcon>:not(:hover)>.jp-icon-busy[fill]{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper #jp-main-statusbar .jp-mod-selected .jp-icon-selectable[fill]{fill:#fff}.jupyter-wrapper #jp-main-statusbar .jp-mod-selected .jp-icon-selectable-inverse[fill]{fill:var(--jp-brand-color1)}.jupyter-wrapper :root{--jp-warn-color0: var(--md-orange-700)}.jupyter-wrapper .jp-DragIcon{margin-right:4px}.jupyter-wrapper .jp-icon-alt .jp-icon0[fill]{fill:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-alt .jp-icon1[fill]{fill:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-alt .jp-icon2[fill]{fill:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-alt .jp-icon3[fill]{fill:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-alt .jp-icon4[fill]{fill:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-alt .jp-icon0[stroke]{stroke:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-alt .jp-icon1[stroke]{stroke:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-alt .jp-icon2[stroke]{stroke:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-alt .jp-icon3[stroke]{stroke:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-alt .jp-icon4[stroke]{stroke:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent0[fill]{fill:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent1[fill]{fill:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent2[fill]{fill:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent3[fill]{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent4[fill]{fill:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent0[stroke]{stroke:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent1[stroke]{stroke:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent2[stroke]{stroke:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent3[stroke]{stroke:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon-alt .jp-icon-accent4[stroke]{stroke:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon-hoverShow:not(:hover) .jp-icon-hoverShow-content{display:none!important}.jupyter-wrapper .jp-icon-hover :hover .jp-icon0-hover[fill]{fill:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon1-hover[fill]{fill:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon2-hover[fill]{fill:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon3-hover[fill]{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon4-hover[fill]{fill:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon0-hover[stroke]{stroke:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon1-hover[stroke]{stroke:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon2-hover[stroke]{stroke:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon3-hover[stroke]{stroke:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon4-hover[stroke]{stroke:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent0-hover[fill]{fill:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent1-hover[fill]{fill:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent2-hover[fill]{fill:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent3-hover[fill]{fill:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent4-hover[fill]{fill:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent0-hover[stroke]{stroke:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent1-hover[stroke]{stroke:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent2-hover[stroke]{stroke:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent3-hover[stroke]{stroke:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-accent4-hover[stroke]{stroke:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-none-hover[fill]{fill:none}.jupyter-wrapper .jp-icon-hover :hover .jp-icon-none-hover[stroke]{stroke:none}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon0-hover[fill]{fill:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon1-hover[fill]{fill:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon2-hover[fill]{fill:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon3-hover[fill]{fill:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon4-hover[fill]{fill:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon0-hover[stroke]{stroke:var(--jp-layout-color0)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon1-hover[stroke]{stroke:var(--jp-layout-color1)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon2-hover[stroke]{stroke:var(--jp-layout-color2)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon3-hover[stroke]{stroke:var(--jp-layout-color3)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon4-hover[stroke]{stroke:var(--jp-layout-color4)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent0-hover[fill]{fill:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent1-hover[fill]{fill:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent2-hover[fill]{fill:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent3-hover[fill]{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent4-hover[fill]{fill:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent0-hover[stroke]{stroke:var(--jp-inverse-layout-color0)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent1-hover[stroke]{stroke:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent2-hover[stroke]{stroke:var(--jp-inverse-layout-color2)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent3-hover[stroke]{stroke:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-icon-hover.jp-icon-alt :hover .jp-icon-accent4-hover[stroke]{stroke:var(--jp-inverse-layout-color4)}.jupyter-wrapper .jp-switch{display:flex;align-items:center;padding-left:4px;padding-right:4px;font-size:var(--jp-ui-font-size1);background-color:transparent;color:var(--jp-ui-font-color1);border:none;height:20px}.jupyter-wrapper .jp-switch:hover{background-color:var(--jp-layout-color2)}.jupyter-wrapper .jp-switch-label{margin-right:5px}.jupyter-wrapper .jp-switch-track{cursor:pointer;background-color:var(--jp-switch-color, var(--jp-border-color1));-webkit-transition:.4s;transition:.4s;border-radius:34px;height:16px;width:35px;position:relative}.jupyter-wrapper .jp-switch-track:before{content:\"\";position:absolute;height:10px;width:10px;margin:3px;left:0;background-color:var(--jp-ui-inverse-font-color1);-webkit-transition:.4s;transition:.4s;border-radius:50%}.jupyter-wrapper .jp-switch[aria-checked=true] .jp-switch-track{background-color:var(--jp-switch-true-position-color, var(--jp-warn-color0))}.jupyter-wrapper .jp-switch[aria-checked=true] .jp-switch-track:before{left:19px}.jupyter-wrapper html{box-sizing:unset}.jupyter-wrapper *,.jupyter-wrapper *:before,.jupyter-wrapper *:after{box-sizing:unset}.jupyter-wrapper body{color:unset;font-family:var(--jp-ui-font-family)}.jupyter-wrapper :focus{outline:unset;outline-offset:unset;-moz-outline-radius:unset}.jupyter-wrapper .jp-Button{border-radius:var(--jp-border-radius);padding:0 12px;font-size:var(--jp-ui-font-size1)}.jupyter-wrapper button.jp-Button.bp3-button.bp3-minimal:hover{background-color:var(--jp-layout-color2)}.jupyter-wrapper .jp-Button.minimal{color:unset!important}.jupyter-wrapper .jp-Button.jp-ToolbarButtonComponent{text-transform:none}.jupyter-wrapper .jp-InputGroup input{box-sizing:border-box;border-radius:0;background-color:transparent;color:var(--jp-ui-font-color0);box-shadow:inset 0 0 0 var(--jp-border-width) var(--jp-input-border-color)}.jupyter-wrapper .jp-InputGroup input:focus{box-shadow:inset 0 0 0 var(--jp-border-width) var(--jp-input-active-box-shadow-color),inset 0 0 0 3px var(--jp-input-active-box-shadow-color)}.jupyter-wrapper .jp-InputGroup input::placeholder,.jupyter-wrapper input::placeholder{color:var(--jp-ui-font-color3)}.jupyter-wrapper .jp-BPIcon{display:inline-block;vertical-align:middle;margin:auto}.jupyter-wrapper .bp3-icon.jp-BPIcon>svg:not([fill]){fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-InputGroupAction{padding:6px}.jupyter-wrapper .jp-HTMLSelect.jp-DefaultStyle select{background-color:initial;border:none;border-radius:0;box-shadow:none;color:var(--jp-ui-font-color0);display:block;font-size:var(--jp-ui-font-size1);height:24px;line-height:14px;padding:0 25px 0 10px;text-align:left;-moz-appearance:none;-webkit-appearance:none}.jupyter-wrapper .jp-HTMLSelect.jp-DefaultStyle select:hover,.jupyter-wrapper .jp-HTMLSelect.jp-DefaultStyle select>option{background-color:var(--jp-layout-color2);color:var(--jp-ui-font-color0)}.jupyter-wrapper select{box-sizing:border-box}.jupyter-wrapper .jp-Collapse{display:flex;flex-direction:column;align-items:stretch;border-top:1px solid var(--jp-border-color2);border-bottom:1px solid var(--jp-border-color2)}.jupyter-wrapper .jp-Collapse-header{padding:1px 12px;color:var(--jp-ui-font-color1);background-color:var(--jp-layout-color1);font-size:var(--jp-ui-font-size2)}.jupyter-wrapper .jp-Collapse-header:hover{background-color:var(--jp-layout-color2)}.jupyter-wrapper .jp-Collapse-contents{padding:0 12px;background-color:var(--jp-layout-color1);color:var(--jp-ui-font-color1);overflow:auto}.jupyter-wrapper :root{--jp-private-commandpalette-search-height: 28px}.jupyter-wrapper .lm-CommandPalette{padding-bottom:0;color:var(--jp-ui-font-color1);background:var(--jp-layout-color1);font-size:var(--jp-ui-font-size1)}.jupyter-wrapper .jp-ModalCommandPalette{position:absolute;z-index:10000;top:38px;left:30%;margin:0;padding:4px;width:40%;box-shadow:var(--jp-elevation-z4);border-radius:4px;background:var(--jp-layout-color0)}.jupyter-wrapper .jp-ModalCommandPalette .lm-CommandPalette{max-height:40vh}.jupyter-wrapper .jp-ModalCommandPalette .lm-CommandPalette .lm-close-icon:after{display:none}.jupyter-wrapper .jp-ModalCommandPalette .lm-CommandPalette .lm-CommandPalette-header{display:none}.jupyter-wrapper .jp-ModalCommandPalette .lm-CommandPalette .lm-CommandPalette-item{margin-left:4px;margin-right:4px}.jupyter-wrapper .jp-ModalCommandPalette .lm-CommandPalette .lm-CommandPalette-item.lm-mod-disabled{display:none}.jupyter-wrapper .lm-CommandPalette-search{padding:4px;background-color:var(--jp-layout-color1);z-index:2}.jupyter-wrapper .lm-CommandPalette-wrapper{overflow:overlay;padding:0 9px;background-color:var(--jp-input-active-background);height:30px;box-shadow:inset 0 0 0 var(--jp-border-width) var(--jp-input-border-color)}.jupyter-wrapper .lm-CommandPalette.lm-mod-focused .lm-CommandPalette-wrapper{box-shadow:inset 0 0 0 1px var(--jp-input-active-box-shadow-color),inset 0 0 0 3px var(--jp-input-active-box-shadow-color)}.jupyter-wrapper .jp-SearchIconGroup{color:#fff;background-color:var(--jp-brand-color1);position:absolute;top:4px;right:4px;padding:5px 5px 1px}.jupyter-wrapper .jp-SearchIconGroup svg{height:20px;width:20px}.jupyter-wrapper .jp-SearchIconGroup .jp-icon3[fill]{fill:var(--jp-layout-color0)}.jupyter-wrapper .lm-CommandPalette-input{background:transparent;width:calc(100% - 18px);float:left;border:none;outline:none;font-size:var(--jp-ui-font-size1);color:var(--jp-ui-font-color0);line-height:var(--jp-private-commandpalette-search-height)}.jupyter-wrapper .lm-CommandPalette-input::-webkit-input-placeholder,.jupyter-wrapper .lm-CommandPalette-input::-moz-placeholder,.jupyter-wrapper .lm-CommandPalette-input:-ms-input-placeholder{color:var(--jp-ui-font-color2);font-size:var(--jp-ui-font-size1)}.jupyter-wrapper .lm-CommandPalette-header:first-child{margin-top:0}.jupyter-wrapper .lm-CommandPalette-header{border-bottom:solid var(--jp-border-width) var(--jp-border-color2);color:var(--jp-ui-font-color1);cursor:pointer;display:flex;font-size:var(--jp-ui-font-size0);font-weight:600;letter-spacing:1px;margin-top:8px;padding:8px 0 8px 12px;text-transform:uppercase}.jupyter-wrapper .lm-CommandPalette-header.lm-mod-active{background:var(--jp-layout-color2)}.jupyter-wrapper .lm-CommandPalette-header>mark{background-color:transparent;font-weight:700;color:var(--jp-ui-font-color1)}.jupyter-wrapper .lm-CommandPalette-item{padding:4px 12px 4px 4px;color:var(--jp-ui-font-color1);font-size:var(--jp-ui-font-size1);font-weight:400;display:flex}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-disabled{color:var(--jp-ui-font-color2)}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-active{color:var(--jp-ui-inverse-font-color1);background:var(--jp-brand-color1)}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-active .jp-icon-selectable[fill]{fill:var(--jp-layout-color0)}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-active .lm-CommandPalette-itemLabel>mark{color:var(--jp-ui-inverse-font-color0)}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-active:hover:not(.lm-mod-disabled){color:var(--jp-ui-inverse-font-color1);background:var(--jp-brand-color1)}.jupyter-wrapper .lm-CommandPalette-item:hover:not(.lm-mod-active):not(.lm-mod-disabled){background:var(--jp-layout-color2)}.jupyter-wrapper .lm-CommandPalette-itemContent{overflow:hidden}.jupyter-wrapper .lm-CommandPalette-itemLabel>mark{color:var(--jp-ui-font-color0);background-color:transparent;font-weight:700}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-disabled mark{color:var(--jp-ui-font-color2)}.jupyter-wrapper .lm-CommandPalette-item .lm-CommandPalette-itemIcon{margin:0 4px 0 0;position:relative;width:16px;top:2px;flex:0 0 auto}.jupyter-wrapper .lm-CommandPalette-item.lm-mod-disabled .lm-CommandPalette-itemIcon{opacity:.6}.jupyter-wrapper .lm-CommandPalette-item .lm-CommandPalette-itemShortcut{flex:0 0 auto}.jupyter-wrapper .lm-CommandPalette-itemCaption{display:none}.jupyter-wrapper .lm-CommandPalette-content{background-color:var(--jp-layout-color1)}.jupyter-wrapper .lm-CommandPalette-content:empty:after{content:\"No results\";margin:20px auto auto;width:100px;display:block;font-size:var(--jp-ui-font-size2);font-family:var(--jp-ui-font-family);font-weight:lighter}.jupyter-wrapper .lm-CommandPalette-emptyMessage{text-align:center;margin-top:24px;line-height:1.32;padding:0 8px;color:var(--jp-content-font-color3)}.jupyter-wrapper .jp-Dialog{position:absolute;z-index:10000;display:flex;flex-direction:column;align-items:center;justify-content:center;top:0;left:0;margin:0;padding:0;width:100%;height:100%;background:var(--jp-dialog-background)}.jupyter-wrapper .jp-Dialog-content{display:flex;flex-direction:column;margin-left:auto;margin-right:auto;background:var(--jp-layout-color1);padding:24px 24px 12px;min-width:300px;min-height:150px;max-width:1000px;max-height:500px;box-sizing:border-box;box-shadow:var(--jp-elevation-z20);word-wrap:break-word;border-radius:var(--jp-border-radius);font-size:var(--jp-ui-font-size1);color:var(--jp-ui-font-color1);resize:both}.jupyter-wrapper .jp-Dialog-content.jp-Dialog-content-small{max-width:500px}.jupyter-wrapper .jp-Dialog-button{overflow:visible}.jupyter-wrapper button.jp-Dialog-button:focus{outline:1px solid var(--jp-brand-color1);outline-offset:4px;-moz-outline-radius:0px}.jupyter-wrapper button.jp-Dialog-button:focus::-moz-focus-inner{border:0}.jupyter-wrapper button.jp-Dialog-button.jp-mod-styled.jp-mod-accept:focus,.jupyter-wrapper button.jp-Dialog-button.jp-mod-styled.jp-mod-warn:focus,.jupyter-wrapper button.jp-Dialog-button.jp-mod-styled.jp-mod-reject:focus{outline-offset:4px;-moz-outline-radius:0px}.jupyter-wrapper button.jp-Dialog-button.jp-mod-styled.jp-mod-accept:focus{outline:1px solid var(--md-blue-700)}.jupyter-wrapper button.jp-Dialog-button.jp-mod-styled.jp-mod-warn:focus{outline:1px solid var(--md-red-600)}.jupyter-wrapper button.jp-Dialog-button.jp-mod-styled.jp-mod-reject:focus{outline:1px solid var(--md-grey-700)}.jupyter-wrapper button.jp-Dialog-close-button{padding:0;height:100%;min-width:unset;min-height:unset}.jupyter-wrapper .jp-Dialog-header{display:flex;justify-content:space-between;flex:0 0 auto;padding-bottom:12px;font-size:var(--jp-ui-font-size3);font-weight:400;color:var(--jp-ui-font-color0)}.jupyter-wrapper .jp-Dialog-body{display:flex;flex-direction:column;flex:1 1 auto;font-size:var(--jp-ui-font-size1);background:var(--jp-layout-color1);overflow:auto}.jupyter-wrapper .jp-Dialog-footer{display:flex;flex-direction:row;justify-content:flex-end;align-items:center;flex:0 0 auto;margin-left:-12px;margin-right:-12px;padding:12px}.jupyter-wrapper .jp-Dialog-checkbox{padding-right:5px}.jupyter-wrapper .jp-Dialog-checkbox>input:focus-visible{outline:1px solid var(--jp-input-active-border-color);outline-offset:1px}.jupyter-wrapper .jp-Dialog-spacer{flex:1 1 auto}.jupyter-wrapper .jp-Dialog-title{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.jupyter-wrapper .jp-Dialog-body>.jp-select-wrapper{width:100%}.jupyter-wrapper .jp-Dialog-body>button{padding:0 16px}.jupyter-wrapper .jp-Dialog-body>label{line-height:1.4;color:var(--jp-ui-font-color0)}.jupyter-wrapper .jp-Dialog-button.jp-mod-styled:not(:last-child){margin-right:12px}.jupyter-wrapper .jp-HoverBox{position:fixed}.jupyter-wrapper .jp-HoverBox.jp-mod-outofview{display:none}.jupyter-wrapper .jp-IFrame{width:100%;height:100%}.jupyter-wrapper .jp-IFrame>iframe{border:none}.jupyter-wrapper body.lm-mod-override-cursor .jp-IFrame{position:relative}.jupyter-wrapper body.lm-mod-override-cursor .jp-IFrame:before{content:\"\";position:absolute;top:0;left:0;right:0;bottom:0;background:transparent}.jupyter-wrapper .jp-Input-Boolean-Dialog{flex-direction:row-reverse;align-items:end;width:100%}.jupyter-wrapper .jp-Input-Boolean-Dialog>label{flex:1 1 auto}.jupyter-wrapper .jp-MainAreaWidget>:focus{outline:none}.jupyter-wrapper .jp-MainAreaWidget .jp-MainAreaWidget-error{padding:6px}.jupyter-wrapper .jp-MainAreaWidget .jp-MainAreaWidget-error>pre{width:auto;padding:10px;background:var(--jp-error-color3);border:var(--jp-border-width) solid var(--jp-error-color1);border-radius:var(--jp-border-radius);color:var(--jp-ui-font-color1);font-size:var(--jp-ui-font-size1);white-space:pre-wrap;word-wrap:break-word}.jupyter-wrapper .jp-MainAreaWidget{contain:strict}.jupyter-wrapper :root{--md-red-50: #ffebee;--md-red-100: #ffcdd2;--md-red-200: #ef9a9a;--md-red-300: #e57373;--md-red-400: #ef5350;--md-red-500: #f44336;--md-red-600: #e53935;--md-red-700: #d32f2f;--md-red-800: #c62828;--md-red-900: #b71c1c;--md-red-A100: #ff8a80;--md-red-A200: #ff5252;--md-red-A400: #ff1744;--md-red-A700: #d50000;--md-pink-50: #fce4ec;--md-pink-100: #f8bbd0;--md-pink-200: #f48fb1;--md-pink-300: #f06292;--md-pink-400: #ec407a;--md-pink-500: #e91e63;--md-pink-600: #d81b60;--md-pink-700: #c2185b;--md-pink-800: #ad1457;--md-pink-900: #880e4f;--md-pink-A100: #ff80ab;--md-pink-A200: #ff4081;--md-pink-A400: #f50057;--md-pink-A700: #c51162;--md-purple-50: #f3e5f5;--md-purple-100: #e1bee7;--md-purple-200: #ce93d8;--md-purple-300: #ba68c8;--md-purple-400: #ab47bc;--md-purple-500: #9c27b0;--md-purple-600: #8e24aa;--md-purple-700: #7b1fa2;--md-purple-800: #6a1b9a;--md-purple-900: #4a148c;--md-purple-A100: #ea80fc;--md-purple-A200: #e040fb;--md-purple-A400: #d500f9;--md-purple-A700: #aa00ff;--md-deep-purple-50: #ede7f6;--md-deep-purple-100: #d1c4e9;--md-deep-purple-200: #b39ddb;--md-deep-purple-300: #9575cd;--md-deep-purple-400: #7e57c2;--md-deep-purple-500: #673ab7;--md-deep-purple-600: #5e35b1;--md-deep-purple-700: #512da8;--md-deep-purple-800: #4527a0;--md-deep-purple-900: #311b92;--md-deep-purple-A100: #b388ff;--md-deep-purple-A200: #7c4dff;--md-deep-purple-A400: #651fff;--md-deep-purple-A700: #6200ea;--md-indigo-50: #e8eaf6;--md-indigo-100: #c5cae9;--md-indigo-200: #9fa8da;--md-indigo-300: #7986cb;--md-indigo-400: #5c6bc0;--md-indigo-500: #3f51b5;--md-indigo-600: #3949ab;--md-indigo-700: #303f9f;--md-indigo-800: #283593;--md-indigo-900: #1a237e;--md-indigo-A100: #8c9eff;--md-indigo-A200: #536dfe;--md-indigo-A400: #3d5afe;--md-indigo-A700: #304ffe;--md-blue-50: #e3f2fd;--md-blue-100: #bbdefb;--md-blue-200: #90caf9;--md-blue-300: #64b5f6;--md-blue-400: #42a5f5;--md-blue-500: #2196f3;--md-blue-600: #1e88e5;--md-blue-700: #1976d2;--md-blue-800: #1565c0;--md-blue-900: #0d47a1;--md-blue-A100: #82b1ff;--md-blue-A200: #448aff;--md-blue-A400: #2979ff;--md-blue-A700: #2962ff;--md-light-blue-50: #e1f5fe;--md-light-blue-100: #b3e5fc;--md-light-blue-200: #81d4fa;--md-light-blue-300: #4fc3f7;--md-light-blue-400: #29b6f6;--md-light-blue-500: #03a9f4;--md-light-blue-600: #039be5;--md-light-blue-700: #0288d1;--md-light-blue-800: #0277bd;--md-light-blue-900: #01579b;--md-light-blue-A100: #80d8ff;--md-light-blue-A200: #40c4ff;--md-light-blue-A400: #00b0ff;--md-light-blue-A700: #0091ea;--md-cyan-50: #e0f7fa;--md-cyan-100: #b2ebf2;--md-cyan-200: #80deea;--md-cyan-300: #4dd0e1;--md-cyan-400: #26c6da;--md-cyan-500: #00bcd4;--md-cyan-600: #00acc1;--md-cyan-700: #0097a7;--md-cyan-800: #00838f;--md-cyan-900: #006064;--md-cyan-A100: #84ffff;--md-cyan-A200: #18ffff;--md-cyan-A400: #00e5ff;--md-cyan-A700: #00b8d4;--md-teal-50: #e0f2f1;--md-teal-100: #b2dfdb;--md-teal-200: #80cbc4;--md-teal-300: #4db6ac;--md-teal-400: #26a69a;--md-teal-500: #009688;--md-teal-600: #00897b;--md-teal-700: #00796b;--md-teal-800: #00695c;--md-teal-900: #004d40;--md-teal-A100: #a7ffeb;--md-teal-A200: #64ffda;--md-teal-A400: #1de9b6;--md-teal-A700: #00bfa5;--md-green-50: #e8f5e9;--md-green-100: #c8e6c9;--md-green-200: #a5d6a7;--md-green-300: #81c784;--md-green-400: #66bb6a;--md-green-500: #4caf50;--md-green-600: #43a047;--md-green-700: #388e3c;--md-green-800: #2e7d32;--md-green-900: #1b5e20;--md-green-A100: #b9f6ca;--md-green-A200: #69f0ae;--md-green-A400: #00e676;--md-green-A700: #00c853;--md-light-green-50: #f1f8e9;--md-light-green-100: #dcedc8;--md-light-green-200: #c5e1a5;--md-light-green-300: #aed581;--md-light-green-400: #9ccc65;--md-light-green-500: #8bc34a;--md-light-green-600: #7cb342;--md-light-green-700: #689f38;--md-light-green-800: #558b2f;--md-light-green-900: #33691e;--md-light-green-A100: #ccff90;--md-light-green-A200: #b2ff59;--md-light-green-A400: #76ff03;--md-light-green-A700: #64dd17;--md-lime-50: #f9fbe7;--md-lime-100: #f0f4c3;--md-lime-200: #e6ee9c;--md-lime-300: #dce775;--md-lime-400: #d4e157;--md-lime-500: #cddc39;--md-lime-600: #c0ca33;--md-lime-700: #afb42b;--md-lime-800: #9e9d24;--md-lime-900: #827717;--md-lime-A100: #f4ff81;--md-lime-A200: #eeff41;--md-lime-A400: #c6ff00;--md-lime-A700: #aeea00;--md-yellow-50: #fffde7;--md-yellow-100: #fff9c4;--md-yellow-200: #fff59d;--md-yellow-300: #fff176;--md-yellow-400: #ffee58;--md-yellow-500: #ffeb3b;--md-yellow-600: #fdd835;--md-yellow-700: #fbc02d;--md-yellow-800: #f9a825;--md-yellow-900: #f57f17;--md-yellow-A100: #ffff8d;--md-yellow-A200: #ffff00;--md-yellow-A400: #ffea00;--md-yellow-A700: #ffd600;--md-amber-50: #fff8e1;--md-amber-100: #ffecb3;--md-amber-200: #ffe082;--md-amber-300: #ffd54f;--md-amber-400: #ffca28;--md-amber-500: #ffc107;--md-amber-600: #ffb300;--md-amber-700: #ffa000;--md-amber-800: #ff8f00;--md-amber-900: #ff6f00;--md-amber-A100: #ffe57f;--md-amber-A200: #ffd740;--md-amber-A400: #ffc400;--md-amber-A700: #ffab00;--md-orange-50: #fff3e0;--md-orange-100: #ffe0b2;--md-orange-200: #ffcc80;--md-orange-300: #ffb74d;--md-orange-400: #ffa726;--md-orange-500: #ff9800;--md-orange-600: #fb8c00;--md-orange-700: #f57c00;--md-orange-800: #ef6c00;--md-orange-900: #e65100;--md-orange-A100: #ffd180;--md-orange-A200: #ffab40;--md-orange-A400: #ff9100;--md-orange-A700: #ff6d00;--md-deep-orange-50: #fbe9e7;--md-deep-orange-100: #ffccbc;--md-deep-orange-200: #ffab91;--md-deep-orange-300: #ff8a65;--md-deep-orange-400: #ff7043;--md-deep-orange-500: #ff5722;--md-deep-orange-600: #f4511e;--md-deep-orange-700: #e64a19;--md-deep-orange-800: #d84315;--md-deep-orange-900: #bf360c;--md-deep-orange-A100: #ff9e80;--md-deep-orange-A200: #ff6e40;--md-deep-orange-A400: #ff3d00;--md-deep-orange-A700: #dd2c00;--md-brown-50: #efebe9;--md-brown-100: #d7ccc8;--md-brown-200: #bcaaa4;--md-brown-300: #a1887f;--md-brown-400: #8d6e63;--md-brown-500: #795548;--md-brown-600: #6d4c41;--md-brown-700: #5d4037;--md-brown-800: #4e342e;--md-brown-900: #3e2723;--md-grey-50: #fafafa;--md-grey-100: #f5f5f5;--md-grey-200: #eeeeee;--md-grey-300: #e0e0e0;--md-grey-400: #bdbdbd;--md-grey-500: #9e9e9e;--md-grey-600: #757575;--md-grey-700: #616161;--md-grey-800: #424242;--md-grey-900: #212121;--md-blue-grey-50: #eceff1;--md-blue-grey-100: #cfd8dc;--md-blue-grey-200: #b0bec5;--md-blue-grey-300: #90a4ae;--md-blue-grey-400: #78909c;--md-blue-grey-500: #607d8b;--md-blue-grey-600: #546e7a;--md-blue-grey-700: #455a64;--md-blue-grey-800: #37474f;--md-blue-grey-900: #263238}.jupyter-wrapper .jp-Spinner{position:absolute;display:flex;justify-content:center;align-items:center;z-index:10;left:0;top:0;width:100%;height:100%;background:var(--jp-layout-color0);outline:none}.jupyter-wrapper .jp-SpinnerContent{font-size:10px;margin:50px auto;text-indent:-9999em;width:3em;height:3em;border-radius:50%;background:var(--jp-brand-color3);background:linear-gradient(to right,#f37626 10%,rgba(255,255,255,0) 42%);position:relative;animation:load3 1s infinite linear,fadeIn 1s}.jupyter-wrapper .jp-SpinnerContent:before{width:50%;height:50%;background:#f37626;border-radius:100% 0 0;position:absolute;top:0;left:0;content:\"\"}.jupyter-wrapper .jp-SpinnerContent:after{background:var(--jp-layout-color0);width:75%;height:75%;border-radius:50%;content:\"\";margin:auto;position:absolute;top:0;left:0;bottom:0;right:0}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes load3{0%{transform:rotate(0)}to{transform:rotate(360deg)}}.jupyter-wrapper button.jp-mod-styled{font-size:var(--jp-ui-font-size1);color:var(--jp-ui-font-color0);border:none;box-sizing:border-box;text-align:center;line-height:32px;height:32px;padding:0 12px;letter-spacing:.8px;outline:none;appearance:none;-webkit-appearance:none;-moz-appearance:none}.jupyter-wrapper input.jp-mod-styled{background:var(--jp-input-background);height:28px;box-sizing:border-box;border:var(--jp-border-width) solid var(--jp-border-color1);padding-left:7px;padding-right:7px;font-size:var(--jp-ui-font-size2);color:var(--jp-ui-font-color0);outline:none;appearance:none;-webkit-appearance:none;-moz-appearance:none}.jupyter-wrapper input[type=checkbox].jp-mod-styled{appearance:checkbox;-webkit-appearance:checkbox;-moz-appearance:checkbox;height:auto}.jupyter-wrapper input.jp-mod-styled:focus{border:var(--jp-border-width) solid var(--md-blue-500);box-shadow:inset 0 0 4px var(--md-blue-300)}.jupyter-wrapper .jp-FileDialog-Checkbox{margin-top:35px;display:flex;flex-direction:row;align-items:end;width:100%}.jupyter-wrapper .jp-FileDialog-Checkbox>label{flex:1 1 auto}.jupyter-wrapper .jp-select-wrapper{display:flex;position:relative;flex-direction:column;padding:1px;background-color:var(--jp-layout-color1);height:28px;box-sizing:border-box;margin-bottom:12px}.jupyter-wrapper .jp-select-wrapper.jp-mod-focused select.jp-mod-styled{border:var(--jp-border-width) solid var(--jp-input-active-border-color);box-shadow:var(--jp-input-box-shadow);background-color:var(--jp-input-active-background)}.jupyter-wrapper select.jp-mod-styled:hover{background-color:var(--jp-layout-color1);cursor:pointer;color:var(--jp-ui-font-color0);background-color:var(--jp-input-hover-background);box-shadow:inset 0 0 1px #00000080}.jupyter-wrapper select.jp-mod-styled{flex:1 1 auto;height:32px;width:100%;font-size:var(--jp-ui-font-size2);background:var(--jp-input-background);color:var(--jp-ui-font-color0);padding:0 25px 0 8px;border:var(--jp-border-width) solid var(--jp-input-border-color);border-radius:0;outline:none;appearance:none;-webkit-appearance:none;-moz-appearance:none}.jupyter-wrapper :root{--jp-private-toolbar-height: calc( 28px + var(--jp-border-width) )}.jupyter-wrapper .jp-Toolbar{color:var(--jp-ui-font-color1);flex:0 0 auto;display:flex;flex-direction:row;border-bottom:var(--jp-border-width) solid var(--jp-toolbar-border-color);box-shadow:var(--jp-toolbar-box-shadow);background:var(--jp-toolbar-background);min-height:var(--jp-toolbar-micro-height);padding:2px;z-index:8;overflow-x:hidden}.jupyter-wrapper .jp-Toolbar>.jp-Toolbar-item.jp-Toolbar-spacer{flex-grow:1;flex-shrink:1}.jupyter-wrapper .jp-Toolbar-item.jp-Toolbar-kernelStatus{display:inline-block;width:32px;background-repeat:no-repeat;background-position:center;background-size:16px}.jupyter-wrapper .jp-Toolbar>.jp-Toolbar-item{flex:0 0 auto;display:flex;padding-left:1px;padding-right:1px;font-size:var(--jp-ui-font-size1);line-height:var(--jp-private-toolbar-height);height:100%}.jupyter-wrapper div.jp-ToolbarButton{color:transparent;border:none;box-sizing:border-box;outline:none;appearance:none;-webkit-appearance:none;-moz-appearance:none;padding:0;margin:0}.jupyter-wrapper button.jp-ToolbarButtonComponent{background:var(--jp-layout-color1);border:none;box-sizing:border-box;outline:none;appearance:none;-webkit-appearance:none;-moz-appearance:none;padding:0 6px;margin:0;height:24px;border-radius:var(--jp-border-radius);display:flex;align-items:center;text-align:center;font-size:14px;min-width:unset;min-height:unset}.jupyter-wrapper button.jp-ToolbarButtonComponent:disabled{opacity:.4}.jupyter-wrapper button.jp-ToolbarButtonComponent span{padding:0;flex:0 0 auto}.jupyter-wrapper button.jp-ToolbarButtonComponent .jp-ToolbarButtonComponent-label{font-size:var(--jp-ui-font-size1);line-height:100%;padding-left:2px;color:var(--jp-ui-font-color1)}.jupyter-wrapper #jp-main-dock-panel[data-mode=single-document] .jp-MainAreaWidget>.jp-Toolbar.jp-Toolbar-micro{padding:0;min-height:0}.jupyter-wrapper #jp-main-dock-panel[data-mode=single-document] .jp-MainAreaWidget>.jp-Toolbar{border:none;box-shadow:none}.jupyter-wrapper body.p-mod-override-cursor *,.jupyter-wrapper body.lm-mod-override-cursor *{cursor:inherit!important}.jupyter-wrapper .jp-JSONEditor{display:flex;flex-direction:column;width:100%}.jupyter-wrapper .jp-JSONEditor-host{flex:1 1 auto;border:var(--jp-border-width) solid var(--jp-input-border-color);border-radius:0;background:var(--jp-layout-color0);min-height:50px;padding:1px}.jupyter-wrapper .jp-JSONEditor.jp-mod-error .jp-JSONEditor-host{border-color:red;outline-color:red}.jupyter-wrapper .jp-JSONEditor-header{display:flex;flex:1 0 auto;padding:0 0 0 12px}.jupyter-wrapper .jp-JSONEditor-header label{flex:0 0 auto}.jupyter-wrapper .jp-JSONEditor-commitButton{height:16px;width:16px;background-size:18px;background-repeat:no-repeat;background-position:center}.jupyter-wrapper .jp-JSONEditor-host.jp-mod-focused{background-color:var(--jp-input-active-background);border:1px solid var(--jp-input-active-border-color);box-shadow:var(--jp-input-box-shadow)}.jupyter-wrapper .jp-Editor.jp-mod-dropTarget{border:var(--jp-border-width) solid var(--jp-input-active-border-color);box-shadow:var(--jp-input-box-shadow)}.jupyter-wrapper .jp-Statusbar-ProgressCircle svg{display:block;margin:0 auto;width:16px;height:24px;align-self:normal}.jupyter-wrapper .jp-Statusbar-ProgressCircle path{fill:var(--jp-inverse-layout-color3)}.jupyter-wrapper .jp-Statusbar-ProgressBar-progress-bar{height:10px;width:100px;border:solid .25px var(--jp-brand-color2);border-radius:3px;overflow:hidden;align-self:center}.jupyter-wrapper .jp-Statusbar-ProgressBar-progress-bar>div{background-color:var(--jp-brand-color2);background-image:linear-gradient(-45deg,rgba(255,255,255,.2) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.2) 50%,rgba(255,255,255,.2) 75%,transparent 75%,transparent);background-size:40px 40px;float:left;width:0%;height:100%;font-size:12px;line-height:14px;color:#fff;text-align:center;animation:jp-Statusbar-ExecutionTime-progress-bar 2s linear infinite}.jupyter-wrapper .jp-Statusbar-ProgressBar-progress-bar p{color:var(--jp-ui-font-color1);font-family:var(--jp-ui-font-family);font-size:var(--jp-ui-font-size1);line-height:10px;width:100px}@keyframes jp-Statusbar-ExecutionTime-progress-bar{0%{background-position:0 0}to{background-position:40px 40px}}.jupyter-wrapper .CodeMirror{font-family:monospace;height:300px;color:#000;direction:ltr}.jupyter-wrapper .CodeMirror-lines{padding:4px 0}.jupyter-wrapper .CodeMirror pre.CodeMirror-line,.jupyter-wrapper .CodeMirror pre.CodeMirror-line-like{padding:0 4px}.jupyter-wrapper .CodeMirror-scrollbar-filler,.jupyter-wrapper .CodeMirror-gutter-filler{background-color:#fff}.jupyter-wrapper .CodeMirror-gutters{border-right:1px solid #ddd;background-color:#f7f7f7;white-space:nowrap}.jupyter-wrapper .CodeMirror-linenumber{padding:0 3px 0 5px;min-width:20px;text-align:right;color:#999;white-space:nowrap}.jupyter-wrapper .CodeMirror-guttermarker{color:#000}.jupyter-wrapper .CodeMirror-guttermarker-subtle{color:#999}.jupyter-wrapper .CodeMirror-cursor{border-left:1px solid black;border-right:none;width:0}.jupyter-wrapper .CodeMirror div.CodeMirror-secondarycursor{border-left:1px solid silver}.jupyter-wrapper .cm-fat-cursor .CodeMirror-cursor{width:auto;border:0!important;background:#7e7}.jupyter-wrapper .cm-fat-cursor div.CodeMirror-cursors{z-index:1}.jupyter-wrapper .cm-fat-cursor-mark{background-color:#14ff1480;-webkit-animation:blink 1.06s steps(1) infinite;-moz-animation:blink 1.06s steps(1) infinite;animation:blink 1.06s steps(1) infinite}.jupyter-wrapper .cm-animate-fat-cursor{width:auto;border:0;-webkit-animation:blink 1.06s steps(1) infinite;-moz-animation:blink 1.06s steps(1) infinite;animation:blink 1.06s steps(1) infinite;background-color:#7e7}@-moz-keyframes blink{50%{background-color:transparent}}@-webkit-keyframes blink{50%{background-color:transparent}}@keyframes blink{50%{background-color:transparent}}.jupyter-wrapper .cm-tab{display:inline-block;text-decoration:inherit}.jupyter-wrapper .CodeMirror-rulers{position:absolute;left:0;right:0;top:-50px;bottom:0;overflow:hidden}.jupyter-wrapper .CodeMirror-ruler{border-left:1px solid #ccc;top:0;bottom:0;position:absolute}.jupyter-wrapper .cm-s-default .cm-header{color:#00f}.jupyter-wrapper .cm-s-default .cm-quote{color:#090}.jupyter-wrapper .cm-negative{color:#d44}.jupyter-wrapper .cm-positive{color:#292}.jupyter-wrapper .cm-header,.jupyter-wrapper .cm-strong{font-weight:700}.jupyter-wrapper .cm-em{font-style:italic}.jupyter-wrapper .cm-link{text-decoration:underline}.jupyter-wrapper .cm-strikethrough{text-decoration:line-through}.jupyter-wrapper .cm-s-default .cm-keyword{color:#708}.jupyter-wrapper .cm-s-default .cm-atom{color:#219}.jupyter-wrapper .cm-s-default .cm-number{color:#164}.jupyter-wrapper .cm-s-default .cm-def{color:#00f}.jupyter-wrapper .cm-s-default .cm-variable-2{color:#05a}.jupyter-wrapper .cm-s-default .cm-variable-3,.jupyter-wrapper .cm-s-default .cm-type{color:#085}.jupyter-wrapper .cm-s-default .cm-comment{color:#a50}.jupyter-wrapper .cm-s-default .cm-string{color:#a11}.jupyter-wrapper .cm-s-default .cm-string-2{color:#f50}.jupyter-wrapper .cm-s-default .cm-meta,.jupyter-wrapper .cm-s-default .cm-qualifier{color:#555}.jupyter-wrapper .cm-s-default .cm-builtin{color:#30a}.jupyter-wrapper .cm-s-default .cm-bracket{color:#997}.jupyter-wrapper .cm-s-default .cm-tag{color:#170}.jupyter-wrapper .cm-s-default .cm-attribute{color:#00c}.jupyter-wrapper .cm-s-default .cm-hr{color:#999}.jupyter-wrapper .cm-s-default .cm-link{color:#00c}.jupyter-wrapper .cm-s-default .cm-error,.jupyter-wrapper .cm-invalidchar{color:red}.jupyter-wrapper .CodeMirror-composing{border-bottom:2px solid}.jupyter-wrapper div.CodeMirror span.CodeMirror-matchingbracket{color:#0b0}.jupyter-wrapper div.CodeMirror span.CodeMirror-nonmatchingbracket{color:#a22}.jupyter-wrapper .CodeMirror-matchingtag{background:rgba(255,150,0,.3)}.jupyter-wrapper .CodeMirror-activeline-background{background:#e8f2ff}.jupyter-wrapper .CodeMirror{position:relative;overflow:hidden;background:white}.jupyter-wrapper .CodeMirror-scroll{overflow:scroll!important;margin-bottom:-50px;margin-right:-50px;padding-bottom:50px;height:100%;outline:none;position:relative}.jupyter-wrapper .CodeMirror-sizer{position:relative;border-right:50px solid transparent}.jupyter-wrapper .CodeMirror-vscrollbar,.jupyter-wrapper .CodeMirror-hscrollbar,.jupyter-wrapper .CodeMirror-scrollbar-filler,.jupyter-wrapper .CodeMirror-gutter-filler{position:absolute;z-index:6;display:none;outline:none}.jupyter-wrapper .CodeMirror-vscrollbar{right:0;top:0;overflow-x:hidden;overflow-y:scroll}.jupyter-wrapper .CodeMirror-hscrollbar{bottom:0;left:0;overflow-y:hidden;overflow-x:scroll}.jupyter-wrapper .CodeMirror-scrollbar-filler{right:0;bottom:0}.jupyter-wrapper .CodeMirror-gutter-filler{left:0;bottom:0}.jupyter-wrapper .CodeMirror-gutters{position:absolute;left:0;top:0;min-height:100%;z-index:3}.jupyter-wrapper .CodeMirror-gutter{white-space:normal;height:100%;display:inline-block;vertical-align:top;margin-bottom:-50px}.jupyter-wrapper .CodeMirror-gutter-wrapper{position:absolute;z-index:4;background:none!important;border:none!important}.jupyter-wrapper .CodeMirror-gutter-background{position:absolute;top:0;bottom:0;z-index:4}.jupyter-wrapper .CodeMirror-gutter-elt{position:absolute;cursor:default;z-index:4}.jupyter-wrapper .CodeMirror-gutter-wrapper ::selection{background-color:transparent}.jupyter-wrapper .CodeMirror-gutter-wrapper ::-moz-selection{background-color:transparent}.jupyter-wrapper .CodeMirror-lines{cursor:text;min-height:1px}.jupyter-wrapper .CodeMirror pre.CodeMirror-line,.jupyter-wrapper .CodeMirror pre.CodeMirror-line-like{-moz-border-radius:0;-webkit-border-radius:0;border-radius:0;border-width:0;background:transparent;font-family:inherit;font-size:inherit;margin:0;white-space:pre;word-wrap:normal;line-height:inherit;color:inherit;z-index:2;position:relative;overflow:visible;-webkit-tap-highlight-color:transparent;-webkit-font-variant-ligatures:contextual;font-variant-ligatures:contextual}.jupyter-wrapper .CodeMirror-wrap pre.CodeMirror-line,.jupyter-wrapper .CodeMirror-wrap pre.CodeMirror-line-like{word-wrap:break-word;white-space:pre-wrap;word-break:normal}.jupyter-wrapper .CodeMirror-linebackground{position:absolute;left:0;right:0;top:0;bottom:0;z-index:0}.jupyter-wrapper .CodeMirror-linewidget{position:relative;z-index:2;padding:.1px}.jupyter-wrapper .CodeMirror-rtl pre{direction:rtl}.jupyter-wrapper .CodeMirror-code{outline:none}.jupyter-wrapper .CodeMirror-scroll,.jupyter-wrapper .CodeMirror-sizer,.jupyter-wrapper .CodeMirror-gutter,.jupyter-wrapper .CodeMirror-gutters,.jupyter-wrapper .CodeMirror-linenumber{-moz-box-sizing:content-box;box-sizing:content-box}.jupyter-wrapper .CodeMirror-measure{position:absolute;width:100%;height:0;overflow:hidden;visibility:hidden}.jupyter-wrapper .CodeMirror-cursor{position:absolute;pointer-events:none}.jupyter-wrapper .CodeMirror-measure pre{position:static}.jupyter-wrapper div.CodeMirror-cursors{visibility:hidden;position:relative;z-index:3}.jupyter-wrapper div.CodeMirror-dragcursors,.jupyter-wrapper .CodeMirror-focused div.CodeMirror-cursors{visibility:visible}.jupyter-wrapper .CodeMirror-selected{background:#d9d9d9}.jupyter-wrapper .CodeMirror-focused .CodeMirror-selected{background:#d7d4f0}.jupyter-wrapper .CodeMirror-crosshair{cursor:crosshair}.jupyter-wrapper .CodeMirror-line::selection,.jupyter-wrapper .CodeMirror-line>span::selection,.jupyter-wrapper .CodeMirror-line>span>span::selection{background:#d7d4f0}.jupyter-wrapper .CodeMirror-line::-moz-selection,.jupyter-wrapper .CodeMirror-line>span::-moz-selection,.jupyter-wrapper .CodeMirror-line>span>span::-moz-selection{background:#d7d4f0}.jupyter-wrapper .cm-searching{background-color:#ffa;background-color:#ff06}.jupyter-wrapper .cm-force-border{padding-right:.1px}@media print{.jupyter-wrapper .CodeMirror div.CodeMirror-cursors{visibility:hidden}}.jupyter-wrapper .cm-tab-wrap-hack:after{content:\"\"}.jupyter-wrapper span.CodeMirror-selectedtext{background:none}.jupyter-wrapper .CodeMirror-dialog{position:absolute;left:0;right:0;background:inherit;z-index:15;padding:.1em .8em;overflow:hidden;color:inherit}.jupyter-wrapper .CodeMirror-dialog-top{border-bottom:1px solid #eee;top:0}.jupyter-wrapper .CodeMirror-dialog-bottom{border-top:1px solid #eee;bottom:0}.jupyter-wrapper .CodeMirror-dialog input{border:none;outline:none;background:transparent;width:20em;color:inherit;font-family:monospace}.jupyter-wrapper .CodeMirror-dialog button{font-size:70%}.jupyter-wrapper .CodeMirror-foldmarker{color:#00f;text-shadow:#b9f 1px 1px 2px,#b9f -1px -1px 2px,#b9f 1px -1px 2px,#b9f -1px 1px 2px;font-family:arial;line-height:.3;cursor:pointer}.jupyter-wrapper .CodeMirror-foldgutter{width:.7em}.jupyter-wrapper .CodeMirror-foldgutter-open,.jupyter-wrapper .CodeMirror-foldgutter-folded{cursor:pointer}.jupyter-wrapper .CodeMirror-foldgutter-open:after{content:\"\u25be\"}.jupyter-wrapper .CodeMirror-foldgutter-folded:after{content:\"\u25b8\"}.jupyter-wrapper .CodeMirror{line-height:var(--jp-code-line-height);font-size:var(--jp-code-font-size);font-family:var(--jp-code-font-family);border:0;border-radius:0;height:auto}.jupyter-wrapper .CodeMirror pre{padding:0 var(--jp-code-padding)}.jupyter-wrapper .CodeMirror.cm-fat-cursor .cm-overlay.cm-searching{opacity:.5}.jupyter-wrapper .jp-CodeMirrorEditor[data-type=inline] .CodeMirror-dialog{background-color:var(--jp-layout-color0);color:var(--jp-content-font-color1)}.jupyter-wrapper .CodeMirror-lines{padding:var(--jp-code-padding) 0}.jupyter-wrapper .CodeMirror-linenumber{padding:0 8px}.jupyter-wrapper .jp-CodeMirrorEditor{cursor:text}.jupyter-wrapper .jp-CodeMirrorEditor[data-type=inline] .CodeMirror-cursor{border-left:var(--jp-code-cursor-width0) solid var(--jp-editor-cursor-color)}@media screen and (min-width: 2138px) and (max-width: 4319px){.jupyter-wrapper .jp-CodeMirrorEditor[data-type=inline] .CodeMirror-cursor{border-left:var(--jp-code-cursor-width1) solid var(--jp-editor-cursor-color)}}@media screen and (min-width: 4320px){.jupyter-wrapper .jp-CodeMirrorEditor[data-type=inline] .CodeMirror-cursor{border-left:var(--jp-code-cursor-width2) solid var(--jp-editor-cursor-color)}}.jupyter-wrapper .CodeMirror.jp-mod-readOnly .CodeMirror-cursor{display:none}.jupyter-wrapper .CodeMirror-gutters{border-right:1px solid var(--jp-border-color2);background-color:var(--jp-layout-color0)}.jupyter-wrapper .jp-CollaboratorCursor{border-left:5px solid transparent;border-right:5px solid transparent;border-top:none;border-bottom:3px solid;background-clip:content-box;margin-left:-5px;margin-right:-5px}.jupyter-wrapper .CodeMirror-selectedtext.cm-searching{background-color:var(--jp-search-selected-match-background-color)!important;color:var(--jp-search-selected-match-color)!important}.jupyter-wrapper .cm-searching{background-color:var(--jp-search-unselected-match-background-color)!important;color:var(--jp-search-unselected-match-color)!important}.jupyter-wrapper .cm-trailingspace{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAFCAYAAAB4ka1VAAAAsElEQVQIHQGlAFr/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7+r3zKmT0/+pk9P/7+r3zAAAAAAAAAAABAAAAAAAAAAA6OPzM+/q9wAAAAAA6OPzMwAAAAAAAAAAAgAAAAAAAAAAGR8NiRQaCgAZIA0AGR8NiQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQyoYJ/SY80UAAAAASUVORK5CYII=);background-position:center left;background-repeat:repeat-x}.jupyter-wrapper .CodeMirror-focused .CodeMirror-selected{background-color:var(--jp-editor-selected-focused-background)}.jupyter-wrapper .CodeMirror-selected{background-color:var(--jp-editor-selected-background)}.jupyter-wrapper .jp-CollaboratorCursor-hover{position:absolute;z-index:1;transform:translate(-50%);color:#fff;border-radius:3px;padding:1px 4px;text-align:center;font-size:var(--jp-ui-font-size1);white-space:nowrap}.jupyter-wrapper .jp-CodeMirror-ruler{border-left:1px dashed var(--jp-border-color2)}.jupyter-wrapper .CodeMirror.cm-s-jupyter{background:var(--jp-layout-color0);color:var(--jp-content-font-color1)}.jupyter-wrapper .jp-CodeConsole .CodeMirror.cm-s-jupyter,.jupyter-wrapper .jp-Notebook .CodeMirror.cm-s-jupyter{background:transparent}.jupyter-wrapper .cm-s-jupyter .CodeMirror-cursor{border-left:var(--jp-code-cursor-width0) solid var(--jp-editor-cursor-color)}.jupyter-wrapper .cm-s-jupyter span.cm-keyword{color:var(--jp-mirror-editor-keyword-color);font-weight:700}.jupyter-wrapper .cm-s-jupyter span.cm-atom{color:var(--jp-mirror-editor-atom-color)}.jupyter-wrapper .cm-s-jupyter span.cm-number{color:var(--jp-mirror-editor-number-color)}.jupyter-wrapper .cm-s-jupyter span.cm-def{color:var(--jp-mirror-editor-def-color)}.jupyter-wrapper .cm-s-jupyter span.cm-variable{color:var(--jp-mirror-editor-variable-color)}.jupyter-wrapper .cm-s-jupyter span.cm-variable-2{color:var(--jp-mirror-editor-variable-2-color)}.jupyter-wrapper .cm-s-jupyter span.cm-variable-3{color:var(--jp-mirror-editor-variable-3-color)}.jupyter-wrapper .cm-s-jupyter span.cm-punctuation{color:var(--jp-mirror-editor-punctuation-color)}.jupyter-wrapper .cm-s-jupyter span.cm-property{color:var(--jp-mirror-editor-property-color)}.jupyter-wrapper .cm-s-jupyter span.cm-operator{color:var(--jp-mirror-editor-operator-color);font-weight:700}.jupyter-wrapper .cm-s-jupyter span.cm-comment{color:var(--jp-mirror-editor-comment-color);font-style:italic}.jupyter-wrapper .cm-s-jupyter span.cm-string{color:var(--jp-mirror-editor-string-color)}.jupyter-wrapper .cm-s-jupyter span.cm-string-2{color:var(--jp-mirror-editor-string-2-color)}.jupyter-wrapper .cm-s-jupyter span.cm-meta{color:var(--jp-mirror-editor-meta-color)}.jupyter-wrapper .cm-s-jupyter span.cm-qualifier{color:var(--jp-mirror-editor-qualifier-color)}.jupyter-wrapper .cm-s-jupyter span.cm-builtin{color:var(--jp-mirror-editor-builtin-color)}.jupyter-wrapper .cm-s-jupyter span.cm-bracket{color:var(--jp-mirror-editor-bracket-color)}.jupyter-wrapper .cm-s-jupyter span.cm-tag{color:var(--jp-mirror-editor-tag-color)}.jupyter-wrapper .cm-s-jupyter span.cm-attribute{color:var(--jp-mirror-editor-attribute-color)}.jupyter-wrapper .cm-s-jupyter span.cm-header{color:var(--jp-mirror-editor-header-color)}.jupyter-wrapper .cm-s-jupyter span.cm-quote{color:var(--jp-mirror-editor-quote-color)}.jupyter-wrapper .cm-s-jupyter span.cm-link{color:var(--jp-mirror-editor-link-color)}.jupyter-wrapper .cm-s-jupyter span.cm-error{color:var(--jp-mirror-editor-error-color)}.jupyter-wrapper .cm-s-jupyter span.cm-hr{color:#999}.jupyter-wrapper .cm-s-jupyter span.cm-tab{background:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAMCAYAAAAkuj5RAAAAAXNSR0IArs4c6QAAAGFJREFUSMft1LsRQFAQheHPowAKoACx3IgEKtaEHujDjORSgWTH/ZOdnZOcM/sgk/kFFWY0qV8foQwS4MKBCS3qR6ixBJvElOobYAtivseIE120FaowJPN75GMu8j/LfMwNjh4HUpwg4LUAAAAASUVORK5CYII=);background-position:right;background-repeat:no-repeat}.jupyter-wrapper .cm-s-jupyter .CodeMirror-activeline-background,.jupyter-wrapper .cm-s-jupyter .CodeMirror-gutter{background-color:var(--jp-layout-color2)}.jupyter-wrapper .jp-CodeMirrorEditor .remote-caret{position:relative;border-left:2px solid black;margin-left:-1px;margin-right:-1px;box-sizing:border-box}.jupyter-wrapper .jp-CodeMirrorEditor .remote-caret>div{white-space:nowrap;position:absolute;top:-1.15em;padding-bottom:.05em;left:-2px;font-size:.95em;background-color:#fa8100;font-family:var(--jp-ui-font-family);font-weight:700;line-height:normal;-webkit-user-select:none;user-select:none;color:#fff;padding-left:2px;padding-right:2px;z-index:3;transition:opacity .3s ease-in-out}.jupyter-wrapper .jp-CodeMirrorEditor .remote-caret.hide-name>div{transition-delay:.7s;opacity:0}.jupyter-wrapper .jp-CodeMirrorEditor .remote-caret:hover>div[style]{opacity:1;transition-delay:0s}.jupyter-wrapper :root{--jp-private-code-span-padding: calc( (var(--jp-code-line-height) - 1) * var(--jp-code-font-size) / 2 )}.jupyter-wrapper .jp-RenderedText{text-align:left;padding-left:var(--jp-code-padding);line-height:var(--jp-code-line-height);font-family:var(--jp-code-font-family)}.jupyter-wrapper .jp-RenderedText pre,.jupyter-wrapper .jp-RenderedJavaScript pre,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore pre{color:var(--jp-content-font-color1);font-size:var(--jp-code-font-size);border:none;margin:0;padding:0}.jupyter-wrapper .jp-RenderedText pre a:link{text-decoration:none;color:var(--jp-content-link-color)}.jupyter-wrapper .jp-RenderedText pre a:hover{text-decoration:underline;color:var(--jp-content-link-color)}.jupyter-wrapper .jp-RenderedText pre a:visited{text-decoration:none;color:var(--jp-content-link-color)}.jupyter-wrapper .jp-RenderedText pre .ansi-black-fg{color:#3e424d}.jupyter-wrapper .jp-RenderedText pre .ansi-red-fg{color:#e75c58}.jupyter-wrapper .jp-RenderedText pre .ansi-green-fg{color:#00a250}.jupyter-wrapper .jp-RenderedText pre .ansi-yellow-fg{color:#ddb62b}.jupyter-wrapper .jp-RenderedText pre .ansi-blue-fg{color:#208ffb}.jupyter-wrapper .jp-RenderedText pre .ansi-magenta-fg{color:#d160c4}.jupyter-wrapper .jp-RenderedText pre .ansi-cyan-fg{color:#60c6c8}.jupyter-wrapper .jp-RenderedText pre .ansi-white-fg{color:#c5c1b4}.jupyter-wrapper .jp-RenderedText pre .ansi-black-bg{background-color:#3e424d;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-red-bg{background-color:#e75c58;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-green-bg{background-color:#00a250;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-yellow-bg{background-color:#ddb62b;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-blue-bg{background-color:#208ffb;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-magenta-bg{background-color:#d160c4;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-cyan-bg{background-color:#60c6c8;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-white-bg{background-color:#c5c1b4;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-black-intense-fg{color:#282c36}.jupyter-wrapper .jp-RenderedText pre .ansi-red-intense-fg{color:#b22b31}.jupyter-wrapper .jp-RenderedText pre .ansi-green-intense-fg{color:#007427}.jupyter-wrapper .jp-RenderedText pre .ansi-yellow-intense-fg{color:#b27d12}.jupyter-wrapper .jp-RenderedText pre .ansi-blue-intense-fg{color:#0065ca}.jupyter-wrapper .jp-RenderedText pre .ansi-magenta-intense-fg{color:#a03196}.jupyter-wrapper .jp-RenderedText pre .ansi-cyan-intense-fg{color:#258f8f}.jupyter-wrapper .jp-RenderedText pre .ansi-white-intense-fg{color:#a1a6b2}.jupyter-wrapper .jp-RenderedText pre .ansi-black-intense-bg{background-color:#282c36;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-red-intense-bg{background-color:#b22b31;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-green-intense-bg{background-color:#007427;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-yellow-intense-bg{background-color:#b27d12;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-blue-intense-bg{background-color:#0065ca;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-magenta-intense-bg{background-color:#a03196;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-cyan-intense-bg{background-color:#258f8f;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-white-intense-bg{background-color:#a1a6b2;padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-default-inverse-fg{color:var(--jp-ui-inverse-font-color0)}.jupyter-wrapper .jp-RenderedText pre .ansi-default-inverse-bg{background-color:var(--jp-inverse-layout-color0);padding:var(--jp-private-code-span-padding) 0}.jupyter-wrapper .jp-RenderedText pre .ansi-bold{font-weight:700}.jupyter-wrapper .jp-RenderedText pre .ansi-underline{text-decoration:underline}.jupyter-wrapper .jp-RenderedText[data-mime-type=\"application/vnd.jupyter.stderr\"]{background:var(--jp-rendermime-error-background);padding-top:var(--jp-code-padding)}.jupyter-wrapper .jp-RenderedLatex{color:var(--jp-content-font-color1);font-size:var(--jp-content-font-size1);line-height:var(--jp-content-line-height)}.jupyter-wrapper .jp-OutputArea-output.jp-RenderedLatex{padding:var(--jp-code-padding);text-align:left}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore{color:var(--jp-content-font-color1);font-family:var(--jp-content-font-family);font-size:var(--jp-content-font-size1);line-height:var(--jp-content-line-height);padding-right:20px}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore em{font-style:italic}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore strong{font-weight:700}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore u{text-decoration:underline}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore a:link{text-decoration:none;color:var(--jp-content-link-color)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore a:hover{text-decoration:underline;color:var(--jp-content-link-color)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore a:visited{text-decoration:none;color:var(--jp-content-link-color)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h1,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h2,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h3,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h4,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h5,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h6{line-height:var(--jp-content-heading-line-height);font-weight:var(--jp-content-heading-font-weight);font-style:normal;margin:var(--jp-content-heading-margin-top) 0 var(--jp-content-heading-margin-bottom) 0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h1:first-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h2:first-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h3:first-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h4:first-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h5:first-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h6:first-child{margin-top:calc(.5 * var(--jp-content-heading-margin-top))}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h1:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h2:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h3:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h4:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h5:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h6:last-child{margin-bottom:calc(.5 * var(--jp-content-heading-margin-bottom))}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h1{font-size:var(--jp-content-font-size5)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h2{font-size:var(--jp-content-font-size4)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h3{font-size:var(--jp-content-font-size3)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h4{font-size:var(--jp-content-font-size2)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h5{font-size:var(--jp-content-font-size1)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore h6{font-size:var(--jp-content-font-size0)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul:not(.list-inline),.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol:not(.list-inline){padding-left:2em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul{list-style:disc}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul ul{list-style:square}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul ul ul{list-style:circle}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol{list-style:decimal}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol ol{list-style:upper-alpha}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol ol ol{list-style:lower-alpha}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol ol ol ol{list-style:lower-roman}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol ol ol ol ol{list-style:decimal}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul{margin-bottom:1em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul ul,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ul ol,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol ul,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore ol ol{margin-bottom:0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore hr{color:var(--jp-border-color2);background-color:var(--jp-border-color1);margin-top:1em;margin-bottom:1em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore>pre{margin:1.5em 2em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore pre,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore code{border:0;background-color:var(--jp-layout-color0);color:var(--jp-content-font-color1);font-family:var(--jp-code-font-family);font-size:inherit;line-height:var(--jp-code-line-height);padding:0;white-space:pre-wrap}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore :not(pre)>code{background-color:var(--jp-layout-color2);padding:1px 5px}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore table{border-collapse:collapse;border-spacing:0;border:none;color:var(--jp-ui-font-color1);font-size:var(--jp-ui-font-size1);table-layout:fixed;margin-left:auto;margin-right:auto}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore thead{border-bottom:var(--jp-border-width) solid var(--jp-border-color1);vertical-align:bottom}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore td,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore th,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore tr{vertical-align:middle;padding:.5em;line-height:normal;white-space:normal;max-width:none;border:none}.jupyter-wrapper .jp-RenderedMarkdown.jp-RenderedHTMLCommon-ignore td,.jupyter-wrapper .jp-RenderedMarkdown.jp-RenderedHTMLCommon-ignore th{max-width:none}.jupyter-wrapper :not(.jp-RenderedMarkdown).jp-RenderedHTMLCommon-ignore td,.jupyter-wrapper :not(.jp-RenderedMarkdown).jp-RenderedHTMLCommon-ignore th,.jupyter-wrapper :not(.jp-RenderedMarkdown).jp-RenderedHTMLCommon-ignore tr{text-align:right}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore th{font-weight:700}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore tbody tr:nth-child(odd){background:var(--jp-layout-color0)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore tbody tr:nth-child(2n){background:var(--jp-rendermime-table-row-background)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore tbody tr:hover{background:var(--jp-rendermime-table-row-hover-background)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore table{margin-bottom:1em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore p{text-align:left;margin:0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore p{margin-bottom:1em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore img{-moz-force-broken-image-icon:1}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore>img{display:block;margin-left:0;margin-right:0;margin-bottom:1em}.jupyter-wrapper [data-jp-theme-light=false] .jp-RenderedImage img.jp-needs-light-background,.jupyter-wrapper [data-jp-theme-light=true] .jp-RenderedImage img.jp-needs-dark-background{background-color:var(--jp-inverse-layout-color1)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore img,.jupyter-wrapper .jp-RenderedImage img,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore svg,.jupyter-wrapper .jp-RenderedSVG svg{max-width:100%;height:auto}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore img.jp-mod-unconfined,.jupyter-wrapper .jp-RenderedImage img.jp-mod-unconfined,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore svg.jp-mod-unconfined,.jupyter-wrapper .jp-RenderedSVG svg.jp-mod-unconfined{max-width:none}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert{padding:var(--jp-notebook-padding);border:var(--jp-border-width) solid transparent;border-radius:var(--jp-border-radius);margin-bottom:1em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-info{color:var(--jp-info-color0);background-color:var(--jp-info-color3);border-color:var(--jp-info-color2)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-info hr{border-color:var(--jp-info-color3)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-info>p:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-info>ul:last-child{margin-bottom:0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-warning{color:var(--jp-warn-color0);background-color:var(--jp-warn-color3);border-color:var(--jp-warn-color2)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-warning hr{border-color:var(--jp-warn-color3)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-warning>p:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-warning>ul:last-child{margin-bottom:0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-success{color:var(--jp-success-color0);background-color:var(--jp-success-color3);border-color:var(--jp-success-color2)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-success hr{border-color:var(--jp-success-color3)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-success>p:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-success>ul:last-child{margin-bottom:0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-danger{color:var(--jp-error-color0);background-color:var(--jp-error-color3);border-color:var(--jp-error-color2)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-danger hr{border-color:var(--jp-error-color3)}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-danger>p:last-child,.jupyter-wrapper .jp-RenderedHTMLCommon-ignore .alert-danger>ul:last-child{margin-bottom:0}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore blockquote{margin:1em 2em;padding:0 1em;border-left:5px solid var(--jp-border-color2)}.jupyter-wrapper a.jp-InternalAnchorLink{visibility:hidden;margin-left:8px;color:var(--md-blue-800)}.jupyter-wrapper h1:hover .jp-InternalAnchorLink,.jupyter-wrapper h2:hover .jp-InternalAnchorLink,.jupyter-wrapper h3:hover .jp-InternalAnchorLink,.jupyter-wrapper h4:hover .jp-InternalAnchorLink,.jupyter-wrapper h5:hover .jp-InternalAnchorLink,.jupyter-wrapper h6:hover .jp-InternalAnchorLink{visibility:visible}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore kbd{background-color:var(--jp-rendermime-table-row-background);border:1px solid var(--jp-border-color0);border-bottom-color:var(--jp-border-color2);border-radius:3px;box-shadow:inset 0 -1px #00000040;display:inline-block;font-size:var(--jp-ui-font-size0);line-height:1em;padding:.2em .5em}.jupyter-wrapper .jp-RenderedHTMLCommon-ignore>*:last-child{margin-bottom:.5em}.jupyter-wrapper .jp-MimeDocument{outline:none}.jupyter-wrapper :root{--jp-private-filebrowser-button-height: 28px;--jp-private-filebrowser-button-width: 48px}.jupyter-wrapper .jp-FileBrowser{display:flex;flex-direction:column;color:var(--jp-ui-font-color1);background:var(--jp-layout-color1);font-size:var(--jp-ui-font-size1)}.jupyter-wrapper .jp-FileBrowser-toolbar.jp-Toolbar{border-bottom:none;height:auto;margin:8px 12px 0;padding:0;box-shadow:none;justify-content:flex-start}.jupyter-wrapper .jp-BreadCrumbs{flex:0 0 auto;margin:8px 12px}.jupyter-wrapper .jp-BreadCrumbs-item{margin:0 2px;padding:0 2px;border-radius:var(--jp-border-radius);cursor:pointer}.jupyter-wrapper .jp-BreadCrumbs-item:hover{background-color:var(--jp-layout-color2)}.jupyter-wrapper .jp-BreadCrumbs-item:first-child{margin-left:0}.jupyter-wrapper .jp-BreadCrumbs-item.jp-mod-dropTarget{background-color:var(--jp-brand-color2);opacity:.7}.jupyter-wrapper .jp-FileBrowser-toolbar>.jp-Toolbar-item{flex:0 0 auto;padding-left:0;padding-right:2px}.jupyter-wrapper .jp-FileBrowser-toolbar>.jp-Toolbar-item .jp-ToolbarButtonComponent{width:40px}.jupyter-wrapper .jp-FileBrowser-toolbar .jp-ToolbarButtonComponent[data-command=\"filebrowser:create-main-launcher\"]{width:72px;background:var(--jp-brand-color1)}.jupyter-wrapper .jp-FileBrowser-toolbar .jp-ToolbarButtonComponent[data-command=\"filebrowser:create-main-launcher\"]:hover,.jupyter-wrapper .jp-FileBrowser-toolbar .jp-ToolbarButtonComponent[data-command=\"filebrowser:create-main-launcher\"]:focus-visible{background-color:var(--jp-brand-color0)!important}.jupyter-wrapper .jp-FileBrowser-toolbar .jp-ToolbarButtonComponent[data-command=\"filebrowser:create-main-launcher\"] .jp-icon3{fill:var(--jp-layout-color1)}.jupyter-wrapper .jp-FileDialog.jp-mod-conflict input{color:var(--jp-error-color1)}.jupyter-wrapper .jp-FileDialog .jp-new-name-title{margin-top:12px}.jupyter-wrapper .jp-LastModified-hidden{display:none}.jupyter-wrapper .jp-FileBrowser-filterBox{padding:0;flex:0 0 auto;margin:8px 12px 0}.jupyter-wrapper .jp-DirListing{flex:1 1 auto;display:flex;flex-direction:column;outline:0}.jupyter-wrapper .jp-DirListing:focus-visible{outline:1px solid var(--jp-brand-color1);outline-offset:-2px}.jupyter-wrapper .jp-DirListing-header{flex:0 0 auto;display:flex;flex-direction:row;overflow:hidden;border-top:var(--jp-border-width) solid var(--jp-border-color2);border-bottom:var(--jp-border-width) solid var(--jp-border-color1);box-shadow:var(--jp-toolbar-box-shadow);z-index:2}.jupyter-wrapper .jp-DirListing-headerItem{padding:4px 12px 2px;font-weight:500}.jupyter-wrapper .jp-DirListing-headerItem:hover{background:var(--jp-layout-color2)}.jupyter-wrapper .jp-DirListing-headerItem.jp-id-name{flex:1 0 84px}.jupyter-wrapper .jp-DirListing-headerItem.jp-id-modified{flex:0 0 112px;border-left:var(--jp-border-width) solid var(--jp-border-color2);text-align:right}.jupyter-wrapper .jp-id-narrow{display:none;flex:0 0 5px;padding:4px;border-left:var(--jp-border-width) solid var(--jp-border-color2);text-align:right;color:var(--jp-border-color2)}.jupyter-wrapper .jp-DirListing-narrow .jp-id-narrow{display:block}.jupyter-wrapper .jp-DirListing-narrow .jp-id-modified,.jupyter-wrapper .jp-DirListing-narrow .jp-DirListing-itemModified{display:none}.jupyter-wrapper .jp-DirListing-headerItem.jp-mod-selected{font-weight:600}.jupyter-wrapper .jp-DirListing-content{flex:1 1 auto;margin:0;padding:0;list-style-type:none;overflow:auto;background-color:var(--jp-layout-color1)}.jupyter-wrapper .jp-DirListing-content mark{color:var(--jp-ui-font-color0);background-color:transparent;font-weight:700}.jupyter-wrapper .jp-DirListing-content .jp-DirListing-item.jp-mod-selected mark{color:var(--jp-ui-inverse-font-color0)}.jupyter-wrapper .jp-DirListing.jp-mod-native-drop .jp-DirListing-content{outline:5px dashed rgba(128,128,128,.5);outline-offset:-10px;cursor:copy}.jupyter-wrapper .jp-DirListing-item{display:flex;flex-direction:row;padding:4px 12px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper .jp-DirListing-item[data-is-dot]{opacity:75%}.jupyter-wrapper .jp-DirListing-item.jp-mod-selected{color:var(--jp-ui-inverse-font-color1);background:var(--jp-brand-color1)}.jupyter-wrapper .jp-DirListing-item.jp-mod-dropTarget{background:var(--jp-brand-color3)}.jupyter-wrapper .jp-DirListing-item:hover:not(.jp-mod-selected){background:var(--jp-layout-color2)}.jupyter-wrapper .jp-DirListing-itemIcon{flex:0 0 20px;margin-right:4px}.jupyter-wrapper .jp-DirListing-itemText{flex:1 0 64px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;-webkit-user-select:none;user-select:none}.jupyter-wrapper .jp-DirListing-itemModified{flex:0 0 125px;text-align:right}.jupyter-wrapper .jp-DirListing-editor{flex:1 0 64px;outline:none;border:none;color:var(--jp-ui-font-color1);background-color:var(--jp-layout-color1)}.jupyter-wrapper .jp-DirListing-item.jp-mod-running .jp-DirListing-itemIcon:before{color:var(--jp-success-color1);content:\"\u25cf\";font-size:8px;position:absolute;left:-8px}.jupyter-wrapper .jp-DirListing-item.jp-mod-running.jp-mod-selected .jp-DirListing-itemIcon:before{color:var(--jp-ui-inverse-font-color1)}.jupyter-wrapper .jp-DirListing-item.lm-mod-drag-image,.jupyter-wrapper .jp-DirListing-item.jp-mod-selected.lm-mod-drag-image{font-size:var(--jp-ui-font-size1);padding-left:4px;margin-left:4px;width:160px;background-color:var(--jp-ui-inverse-font-color2);box-shadow:var(--jp-elevation-z2);border-radius:0;color:var(--jp-ui-font-color1);transform:translate(-40%) translateY(-58%)}.jupyter-wrapper .jp-Document{min-width:120px;min-height:120px;outline:none}.jupyter-wrapper .jp-OutputArea{overflow-y:auto}.jupyter-wrapper .jp-OutputArea-child{display:flex;flex-direction:row}.jupyter-wrapper body[data-format=mobile] .jp-OutputArea-child{flex-direction:column}.jupyter-wrapper .jp-OutputPrompt{flex:0 0 var(--jp-cell-prompt-width);color:var(--jp-cell-outprompt-font-color);font-family:var(--jp-cell-prompt-font-family);padding:var(--jp-code-padding);letter-spacing:var(--jp-cell-prompt-letter-spacing);line-height:var(--jp-code-line-height);font-size:var(--jp-code-font-size);border:var(--jp-border-width) solid transparent;opacity:var(--jp-cell-prompt-opacity);text-align:right;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper body[data-format=mobile] .jp-OutputPrompt{flex:0 0 auto;text-align:left}.jupyter-wrapper .jp-OutputArea-output{height:auto;overflow:auto;user-select:text;-moz-user-select:text;-webkit-user-select:text;-ms-user-select:text}.jupyter-wrapper .jp-OutputArea-child .jp-OutputArea-output{flex-grow:1;flex-shrink:1}.jupyter-wrapper body[data-format=mobile] .jp-OutputArea-child .jp-OutputArea-output{margin-left:var(--jp-notebook-padding)}.jupyter-wrapper .jp-OutputArea-output.jp-mod-isolated{width:100%;display:block}.jupyter-wrapper body.lm-mod-override-cursor .jp-OutputArea-output.jp-mod-isolated{position:relative}.jupyter-wrapper body.lm-mod-override-cursor .jp-OutputArea-output.jp-mod-isolated:before{content:\"\";position:absolute;top:0;left:0;right:0;bottom:0;background:transparent}.jupyter-wrapper .jp-OutputArea-output pre{border:none;margin:0;padding:0;overflow-x:auto;overflow-y:auto;word-break:break-all;word-wrap:break-word;white-space:pre-wrap}.jupyter-wrapper .jp-OutputArea-output.jp-RenderedHTMLCommon-ignore table{margin-left:0;margin-right:0}.jupyter-wrapper .jp-OutputArea-output dl,.jupyter-wrapper .jp-OutputArea-output dt,.jupyter-wrapper .jp-OutputArea-output dd{display:block}.jupyter-wrapper .jp-OutputArea-output dl{width:100%;overflow:hidden;padding:0;margin:0}.jupyter-wrapper .jp-OutputArea-output dt{font-weight:700;float:left;width:20%;padding:0;margin:0}.jupyter-wrapper .jp-OutputArea-output dd{float:left;width:80%;padding:0;margin:0}.jupyter-wrapper .jp-TrimmedOutputs a{margin:10px;text-decoration:none;cursor:pointer}.jupyter-wrapper .jp-OutputArea .jp-OutputArea .jp-OutputArea-prompt{display:none}.jupyter-wrapper .jp-OutputArea-prompt:empty{padding:0;border:0}.jupyter-wrapper .jp-OutputArea-output.jp-OutputArea-executeResult{margin-left:0;flex:1 1 auto}.jupyter-wrapper .jp-OutputArea-executeResult .jp-RenderedText.jp-OutputArea-output{padding-top:var(--jp-code-padding);border-top:var(--jp-border-width) solid transparent}.jupyter-wrapper .jp-Stdin-prompt{color:var(--jp-content-font-color0);padding-right:var(--jp-code-padding);vertical-align:baseline;flex:0 0 auto}.jupyter-wrapper .jp-Stdin-input{font-family:var(--jp-code-font-family);font-size:inherit;color:inherit;background-color:inherit;width:42%;min-width:200px;vertical-align:baseline;padding:0 .25em;margin:0 .25em;flex:0 0 70%}.jupyter-wrapper .jp-Stdin-input::placeholder{opacity:0}.jupyter-wrapper .jp-Stdin-input:focus{box-shadow:none}.jupyter-wrapper .jp-Stdin-input:focus::placeholder{opacity:1}.jupyter-wrapper .jp-LinkedOutputView .jp-OutputArea{height:100%;display:block}.jupyter-wrapper .jp-LinkedOutputView .jp-OutputArea-output:only-child{height:100%}.jupyter-wrapper .jp-Collapser{flex:0 0 var(--jp-cell-collapser-width);padding:0;margin:0;border:none;outline:none;background:transparent;border-radius:var(--jp-border-radius);opacity:1}.jupyter-wrapper .jp-Collapser-child{display:block;width:100%;box-sizing:border-box;position:absolute;top:0;bottom:0}.jupyter-wrapper .jp-CellHeader,.jupyter-wrapper .jp-CellFooter{height:0px;width:100%;padding:0;margin:0;border:none;outline:none;background:transparent}.jupyter-wrapper .jp-InputArea{display:flex;flex-direction:row;overflow:hidden}.jupyter-wrapper body[data-format=mobile] .jp-InputArea{flex-direction:column}.jupyter-wrapper .jp-InputArea-editor{flex:1 1 auto;overflow:hidden}.jupyter-wrapper .jp-InputArea-editor{border:var(--jp-border-width) solid var(--jp-cell-editor-border-color);border-radius:0;background:var(--jp-cell-editor-background)}.jupyter-wrapper body[data-format=mobile] .jp-InputArea-editor{margin-left:var(--jp-notebook-padding)}.jupyter-wrapper .jp-InputPrompt{flex:0 0 var(--jp-cell-prompt-width);color:var(--jp-cell-inprompt-font-color);font-family:var(--jp-cell-prompt-font-family);padding:var(--jp-code-padding);letter-spacing:var(--jp-cell-prompt-letter-spacing);line-height:var(--jp-code-line-height);font-size:var(--jp-code-font-size);border:var(--jp-border-width) solid transparent;opacity:var(--jp-cell-prompt-opacity);text-align:right;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jupyter-wrapper body[data-format=mobile] .jp-InputPrompt{flex:0 0 auto;text-align:left}.jupyter-wrapper .jp-Placeholder{display:flex;flex-direction:row;flex:1 1 auto}.jupyter-wrapper .jp-Placeholder-prompt{box-sizing:border-box}.jupyter-wrapper .jp-Placeholder-content{flex:1 1 auto;border:none;background:transparent;height:20px;box-sizing:border-box}.jupyter-wrapper .jp-Placeholder-content .jp-MoreHorizIcon{width:32px;height:16px;border:1px solid transparent;border-radius:var(--jp-border-radius)}.jupyter-wrapper .jp-Placeholder-content .jp-MoreHorizIcon:hover{border:1px solid var(--jp-border-color1);box-shadow:0 0 2px #00000040;background-color:var(--jp-layout-color0)}.jupyter-wrapper :root{--jp-private-cell-scrolling-output-offset: 5px}.jupyter-wrapper .jp-Cell{padding:var(--jp-cell-padding);margin:0;border:none;outline:none;background:transparent}.jupyter-wrapper .jp-Cell-inputWrapper,.jupyter-wrapper .jp-Cell-outputWrapper{display:flex;flex-direction:row;padding:0;margin:0;overflow:visible}.jupyter-wrapper .jp-Cell-inputArea,.jupyter-wrapper .jp-Cell-outputArea{flex:1 1 auto}.jupyter-wrapper .jp-Cell.jp-mod-noOutputs .jp-Cell-outputCollapser{border:none!important;background:transparent!important}.jupyter-wrapper .jp-Cell:not(.jp-mod-noOutputs) .jp-Cell-outputCollapser{min-height:var(--jp-cell-collapser-min-height)}.jupyter-wrapper .jp-Cell:not(.jp-mod-noOutputs) .jp-Cell-outputWrapper{margin-top:5px}.jupyter-wrapper .jp-CodeCell.jp-mod-outputsScrolled .jp-Cell-outputArea{overflow-y:auto;max-height:24em;margin-left:var(--jp-private-cell-scrolling-output-offset)}.jupyter-wrapper .jp-CodeCell.jp-mod-outputsScrolled .jp-Cell-outputArea:after{content:\" \";box-shadow:inset 0 0 6px 2px #0000004d;width:100%;height:100%;position:sticky;bottom:0;top:0;margin-top:-50%;float:left;display:block;pointer-events:none}.jupyter-wrapper .jp-CodeCell.jp-mod-outputsScrolled .jp-OutputArea-child{padding-top:6px}.jupyter-wrapper .jp-CodeCell.jp-mod-outputsScrolled .jp-OutputArea-prompt{flex:0 0 calc(var(--jp-cell-prompt-width) - var(--jp-private-cell-scrolling-output-offset))}.jupyter-wrapper .jp-MarkdownOutput{flex:1 1 auto;margin-top:0;margin-bottom:0;padding-left:var(--jp-code-padding)}.jupyter-wrapper .jp-MarkdownOutput.jp-RenderedHTMLCommon-ignore{overflow:auto}.jupyter-wrapper .jp-collapseHeadingButton{display:none;min-height:var(--jp-cell-collapser-min-height);font-size:var(--jp-code-font-size);position:absolute;right:0;top:0;bottom:0;background-color:transparent;background-size:25px;background-repeat:no-repeat;background-position-x:center;background-position-y:top;background-image:var(--jp-icon-caret-down);border:none;cursor:pointer}.jupyter-wrapper .jp-collapseHeadingButton:hover{background-color:var(--jp-layout-color2)}.jupyter-wrapper .jp-collapseHeadingButton.jp-mod-collapsed{background-image:var(--jp-icon-caret-right)}.jupyter-wrapper :is(.jp-MarkdownCell:hover,.jp-mod-active) .jp-collapseHeadingButton{display:flex}.jupyter-wrapper .jp-MarkdownCell .jp-InputPrompt{font-size:var(--jp-content-font-size1)}.jupyter-wrapper .jp-mod-rendered .jp-collapseHeadingButton[data-heading-level=\"1\"]{font-size:var(--jp-content-font-size5);background-position-y:calc(.3 * var(--jp-content-font-size5))}.jupyter-wrapper .jp-mod-rendered .jp-collapseHeadingButton[data-heading-level=\"2\"]{font-size:var(--jp-content-font-size4);background-position-y:calc(.3 * var(--jp-content-font-size4))}.jupyter-wrapper .jp-mod-rendered .jp-collapseHeadingButton[data-heading-level=\"3\"]{font-size:var(--jp-content-font-size3);background-position-y:calc(.3 * var(--jp-content-font-size3))}.jupyter-wrapper .jp-mod-rendered .jp-collapseHeadingButton[data-heading-level=\"4\"]{font-size:var(--jp-content-font-size2);background-position-y:calc(.3 * var(--jp-content-font-size2))}.jupyter-wrapper .jp-mod-rendered .jp-collapseHeadingButton[data-heading-level=\"5\"]{font-size:var(--jp-content-font-size1);background-position-y:top}.jupyter-wrapper .jp-mod-rendered .jp-collapseHeadingButton[data-heading-level=\"6\"]{font-size:var(--jp-content-font-size0);background-position-y:top}.jupyter-wrapper .jp-showHiddenCellsButton{margin-left:calc(var(--jp-cell-prompt-width) + 2 * var(--jp-code-padding));margin-top:var(--jp-code-padding);border:1px solid var(--jp-border-color2);background-color:var(--jp-border-color3)!important;color:var(--jp-content-font-color0)!important}.jupyter-wrapper .jp-showHiddenCellsButton:hover{background-color:var(--jp-border-color2)!important}.jupyter-wrapper :root{--jp-notebook-toolbar-padding: 2px 5px 2px 2px}.jupyter-wrapper .jp-NotebookPanel-toolbar{padding:var(--jp-notebook-toolbar-padding)}.jupyter-wrapper .jp-Toolbar-item.jp-Notebook-toolbarCellType .jp-select-wrapper.jp-mod-focused{border:none;box-shadow:none}.jupyter-wrapper .jp-Notebook-toolbarCellTypeDropdown select{height:24px;font-size:var(--jp-ui-font-size1);line-height:14px;border-radius:0;display:block}.jupyter-wrapper .jp-Notebook-toolbarCellTypeDropdown span{top:5px!important}.jupyter-wrapper .jp-Toolbar-responsive-popup{position:absolute;height:fit-content;display:flex;flex-direction:row;flex-wrap:wrap;justify-content:flex-end;border-bottom:var(--jp-border-width) solid var(--jp-toolbar-border-color);box-shadow:var(--jp-toolbar-box-shadow);background:var(--jp-toolbar-background);min-height:var(--jp-toolbar-micro-height);padding:var(--jp-notebook-toolbar-padding);z-index:1;right:0;top:0}.jupyter-wrapper .jp-Toolbar>.jp-Toolbar-responsive-opener{margin-left:auto}.jupyter-wrapper .jp-Notebook-ExecutionIndicator{position:relative;display:inline-block;height:100%;z-index:9997}.jupyter-wrapper .jp-Notebook-ExecutionIndicator-tooltip{visibility:hidden;height:auto;width:max-content;width:-moz-max-content;background-color:var(--jp-layout-color2);color:var(--jp-ui-font-color1);text-align:justify;border-radius:6px;padding:0 5px;position:fixed;display:table}.jupyter-wrapper .jp-Notebook-ExecutionIndicator-tooltip.up{transform:translate(-50%) translateY(-100%) translateY(-32px)}.jupyter-wrapper .jp-Notebook-ExecutionIndicator-tooltip.down{transform:translate(calc(-100% + 16px)) translateY(5px)}.jupyter-wrapper .jp-Notebook-ExecutionIndicator-tooltip.hidden{display:none}.jupyter-wrapper .jp-Notebook-ExecutionIndicator:hover .jp-Notebook-ExecutionIndicator-tooltip{visibility:visible}.jupyter-wrapper .jp-Notebook-ExecutionIndicator span{font-size:var(--jp-ui-font-size1);font-family:var(--jp-ui-font-family);color:var(--jp-ui-font-color1);line-height:24px;display:block}.jupyter-wrapper .jp-Notebook-ExecutionIndicator-progress-bar{display:flex;justify-content:center;height:100%}.jupyter-wrapper :root{--jp-private-notebook-dragImage-width: 304px;--jp-private-notebook-dragImage-height: 36px;--jp-private-notebook-selected-color: var(--md-blue-400);--jp-private-notebook-active-color: var(--md-green-400)}.jupyter-wrapper .jp-NotebookPanel{display:block;height:100%}.jupyter-wrapper .jp-NotebookPanel.jp-Document{min-width:240px;min-height:120px}.jupyter-wrapper .jp-Notebook{padding:var(--jp-notebook-padding);outline:none;overflow:auto}.jupyter-wrapper .jp-Notebook.jp-mod-scrollPastEnd:after{display:block;content:\"\";min-height:var(--jp-notebook-scroll-padding)}.jupyter-wrapper .jp-MainAreaWidget-ContainStrict .jp-Notebook *{contain:strict}.jupyter-wrapper .jp-Notebook .jp-Cell{overflow:visible}.jupyter-wrapper .jp-Notebook .jp-Cell .jp-InputPrompt{cursor:move;float:left}.jupyter-wrapper .jp-Notebook .jp-Cell:not(.jp-mod-active) .jp-InputPrompt{opacity:var(--jp-cell-prompt-not-active-opacity);color:var(--jp-cell-prompt-not-active-font-color)}.jupyter-wrapper .jp-Notebook .jp-Cell:not(.jp-mod-active) .jp-OutputPrompt{opacity:var(--jp-cell-prompt-not-active-opacity);color:var(--jp-cell-prompt-not-active-font-color)}.jupyter-wrapper .jp-Notebook .jp-Cell.jp-mod-active .jp-Collapser{background:var(--jp-brand-color1)}.jupyter-wrapper .jp-Notebook .jp-Cell.jp-mod-dirty .jp-InputPrompt{color:var(--jp-warn-color1)}.jupyter-wrapper .jp-Notebook .jp-Cell.jp-mod-dirty .jp-InputPrompt:before{color:var(--jp-warn-color1);content:\"\u2022\"}.jupyter-wrapper .jp-Notebook .jp-Cell.jp-mod-active.jp-mod-dirty .jp-Collapser{background:var(--jp-warn-color1)}.jupyter-wrapper .jp-Notebook .jp-Cell .jp-Collapser:hover{box-shadow:var(--jp-elevation-z2);background:var(--jp-brand-color1);opacity:var(--jp-cell-collapser-not-active-hover-opacity)}.jupyter-wrapper .jp-Notebook .jp-Cell.jp-mod-active .jp-Collapser:hover{background:var(--jp-brand-color0);opacity:1}.jupyter-wrapper .jp-Notebook.jp-mod-commandMode .jp-Cell.jp-mod-selected{background:var(--jp-notebook-multiselected-color)}.jupyter-wrapper .jp-Notebook.jp-mod-commandMode .jp-Cell.jp-mod-active.jp-mod-selected:not(.jp-mod-multiSelected){background:transparent}.jupyter-wrapper .jp-Notebook.jp-mod-editMode .jp-Cell.jp-mod-active .jp-InputArea-editor{border:var(--jp-border-width) solid var(--jp-cell-editor-active-border-color);box-shadow:var(--jp-input-box-shadow);background-color:var(--jp-cell-editor-active-background)}.jupyter-wrapper .jp-Notebook-cell.jp-mod-dropSource{opacity:.5}.jupyter-wrapper .jp-Notebook-cell.jp-mod-dropTarget,.jupyter-wrapper .jp-Notebook.jp-mod-commandMode .jp-Notebook-cell.jp-mod-active.jp-mod-selected.jp-mod-dropTarget{border-top-color:var(--jp-private-notebook-selected-color);border-top-style:solid;border-top-width:2px}.jupyter-wrapper .jp-dragImage{display:block;flex-direction:row;width:var(--jp-private-notebook-dragImage-width);height:var(--jp-private-notebook-dragImage-height);border:var(--jp-border-width) solid var(--jp-cell-editor-border-color);background:var(--jp-cell-editor-background);overflow:visible}.jupyter-wrapper .jp-dragImage-singlePrompt{box-shadow:2px 2px 4px #0000001f}.jupyter-wrapper .jp-dragImage .jp-dragImage-content{flex:1 1 auto;z-index:2;font-size:var(--jp-code-font-size);font-family:var(--jp-code-font-family);line-height:var(--jp-code-line-height);padding:var(--jp-code-padding);border:var(--jp-border-width) solid var(--jp-cell-editor-border-color);background:var(--jp-cell-editor-background-color);color:var(--jp-content-font-color3);text-align:left;margin:4px 4px 4px 0}.jupyter-wrapper .jp-dragImage .jp-dragImage-prompt{flex:0 0 auto;min-width:36px;color:var(--jp-cell-inprompt-font-color);padding:var(--jp-code-padding);padding-left:12px;font-family:var(--jp-cell-prompt-font-family);letter-spacing:var(--jp-cell-prompt-letter-spacing);line-height:1.9;font-size:var(--jp-code-font-size);border:var(--jp-border-width) solid transparent}.jupyter-wrapper .jp-dragImage-multipleBack{z-index:-1;position:absolute;height:32px;width:300px;top:8px;left:8px;background:var(--jp-layout-color2);border:var(--jp-border-width) solid var(--jp-input-border-color);box-shadow:2px 2px 4px #0000001f}.jupyter-wrapper .jp-NotebookTools{display:block;min-width:var(--jp-sidebar-min-width);color:var(--jp-ui-font-color1);background:var(--jp-layout-color1);font-size:var(--jp-ui-font-size1);overflow:auto}.jupyter-wrapper .jp-NotebookTools-tool{padding:0 12px}.jupyter-wrapper .jp-ActiveCellTool{padding:12px;background-color:var(--jp-layout-color1);border-top:none!important}.jupyter-wrapper .jp-ActiveCellTool .jp-InputArea-prompt{flex:0 0 auto;padding-left:0}.jupyter-wrapper .jp-ActiveCellTool .jp-InputArea-editor{flex:1 1 auto;background:var(--jp-cell-editor-background);border-color:var(--jp-cell-editor-border-color)}.jupyter-wrapper .jp-ActiveCellTool .jp-InputArea-editor .CodeMirror{background:transparent}.jupyter-wrapper .jp-MetadataEditorTool{flex-direction:column;padding:12px 0}.jupyter-wrapper .jp-RankedPanel>:not(:first-child){margin-top:12px}.jupyter-wrapper .jp-KeySelector select.jp-mod-styled{font-size:var(--jp-ui-font-size1);color:var(--jp-ui-font-color0);border:var(--jp-border-width) solid var(--jp-border-color1)}.jupyter-wrapper .jp-KeySelector label,.jupyter-wrapper .jp-MetadataEditorTool label{line-height:1.4}.jupyter-wrapper .jp-NotebookTools .jp-select-wrapper{margin-top:4px;margin-bottom:0}.jupyter-wrapper .jp-NotebookTools .jp-Collapse{margin-top:16px}.jupyter-wrapper .jp-mod-presentationMode .jp-Notebook{--jp-content-font-size1: var(--jp-content-presentation-font-size1);--jp-code-font-size: var(--jp-code-presentation-font-size)}.jupyter-wrapper .jp-mod-presentationMode .jp-Notebook .jp-Cell .jp-InputPrompt,.jupyter-wrapper .jp-mod-presentationMode .jp-Notebook .jp-Cell .jp-OutputPrompt{flex:0 0 110px}.jupyter-wrapper :root{--jp-side-by-side-output-size: 1fr;--jp-side-by-side-resized-cell: var(--jp-side-by-side-output-size)}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-Notebook-cell{margin:3em 5%}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell{display:grid;grid-template-columns:minmax(0,1fr) min-content minmax(0,var(--jp-side-by-side-output-size));grid-template-rows:auto minmax(0,1fr) auto;grid-template-areas:\"header header header\" \"input handle output\" \"footer footer footer\"}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell.jp-mod-resizedCell{grid-template-columns:minmax(0,1fr) min-content minmax(0,var(--jp-side-by-side-resized-cell))}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell .jp-CellHeader{grid-area:header}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell .jp-Cell-inputWrapper{grid-area:input}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell .jp-Cell-outputWrapper{margin-top:0;grid-area:output}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell .jp-CellFooter{grid-area:footer}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell .jp-CellResizeHandle{grid-area:handle;-webkit-user-select:none;user-select:none;display:block;height:100%;cursor:ew-resize;padding:0 var(--jp-cell-padding)}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell .jp-CellResizeHandle:after{content:\"\";display:block;background:var(--jp-border-color2);height:100%;width:5px}.jupyter-wrapper .jp-mod-sideBySide.jp-Notebook .jp-CodeCell.jp-mod-resizedCell .jp-CellResizeHandle:after{background:var(--jp-border-color0)}.jupyter-wrapper .jp-CellResizeHandle{display:none}.jupyter-wrapper .jp-Cell-Placeholder{padding-left:55px}.jupyter-wrapper .jp-Cell-Placeholder-wrapper{background:#fff;border:1px solid;border-color:#e5e6e9 #dfe0e4 #d0d1d5;border-radius:4px;-webkit-border-radius:4px;margin:10px 15px}.jupyter-wrapper .jp-Cell-Placeholder-wrapper-inner{padding:15px;position:relative}.jupyter-wrapper .jp-Cell-Placeholder-wrapper-body{background-repeat:repeat;background-size:50% auto}.jupyter-wrapper .jp-Cell-Placeholder-wrapper-body div{background:#f6f7f8;background-image:-webkit-linear-gradient(left,#f6f7f8 0%,#edeef1 20%,#f6f7f8 40%,#f6f7f8 100%);background-repeat:no-repeat;background-size:800px 104px;height:104px;position:relative}.jupyter-wrapper .jp-Cell-Placeholder-wrapper-body div{position:absolute;right:15px;left:15px;top:15px}.jupyter-wrapper div.jp-Cell-Placeholder-h1{top:20px;height:20px;left:15px;width:150px}.jupyter-wrapper div.jp-Cell-Placeholder-h2{left:15px;top:50px;height:10px;width:100px}.jupyter-wrapper div.jp-Cell-Placeholder-content-1,.jupyter-wrapper div.jp-Cell-Placeholder-content-2,.jupyter-wrapper div.jp-Cell-Placeholder-content-3{left:15px;right:15px;height:10px}.jupyter-wrapper div.jp-Cell-Placeholder-content-1{top:100px}.jupyter-wrapper div.jp-Cell-Placeholder-content-2{top:120px}.jupyter-wrapper div.jp-Cell-Placeholder-content-3{top:140px}.jupyter-wrapper table.dataframe{table-layout:auto!important}.jupyter-wrapper .md-typeset__scrollwrap{margin:0}.jupyter-wrapper .jp-MarkdownOutput{padding:0}.jupyter-wrapper h1 .anchor-link,.jupyter-wrapper h2 .anchor-link,.jupyter-wrapper h3 .anchor-link,.jupyter-wrapper h4 .anchor-link,.jupyter-wrapper h5 .anchor-link,.jupyter-wrapper h6 .anchor-link{display:none;margin-left:.5rem;color:var(--md-default-fg-color--lighter)}.jupyter-wrapper h1 .anchor-link:hover,.jupyter-wrapper h2 .anchor-link:hover,.jupyter-wrapper h3 .anchor-link:hover,.jupyter-wrapper h4 .anchor-link:hover,.jupyter-wrapper h5 .anchor-link:hover,.jupyter-wrapper h6 .anchor-link:hover{text-decoration:none;color:var(--md-accent-fg-color)}.jupyter-wrapper h1:hover .anchor-link,.jupyter-wrapper h2:hover .anchor-link,.jupyter-wrapper h3:hover .anchor-link,.jupyter-wrapper h4:hover .anchor-link,.jupyter-wrapper h5:hover .anchor-link,.jupyter-wrapper h6:hover .anchor-link{display:inline-block}.jupyter-wrapper .jp-InputArea,.jupyter-wrapper .jp-Cell-inputArea,.jupyter-wrapper .jp-RenderedHTMLCommon{width:100%}.jupyter-wrapper .jp-Cell-inputWrapper .jp-InputPrompt{display:none}.jupyter-wrapper .jp-CodeCell .jp-Cell-inputWrapper .jp-InputPrompt{display:block}.jupyter-wrapper .jp-Cell .jp-InputPrompt{cursor:normal}.jupyter-wrapper .highlight pre{background-color:#f5f5f5;padding:10px;overflow:auto}.jupyter-wrapper .celltoolbar{border:none;background:#eee;border-radius:2px 2px 0 0;width:100%;height:29px;padding-right:4px;box-orient:horizontal;box-align:stretch;display:flex;flex-direction:row;align-items:stretch;box-pack:end;justify-content:flex-start;display:-webkit-flex}.jupyter-wrapper .celltoolbar .tags_button_container{display:flex}.jupyter-wrapper .celltoolbar .tags_button_container .tag-container{display:flex;flex-direction:row;flex-grow:1;overflow:hidden;position:relative}.jupyter-wrapper .celltoolbar .tags_button_container .tag-container .cell-tag{display:inline-flex;align-items:center;background-color:#fff;white-space:nowrap;margin:3px 4px;padding:0 4px;border-radius:1px;border:1px solid #ccc;box-shadow:none;width:inherit;font-size:11px;font-family:Roboto Mono,SFMono-Regular,Consolas,Menlo,monospace;height:17px}.jupyter-wrapper .jp-InputArea-editor{width:1px}.jupyter-wrapper .jp-InputPrompt,.jupyter-wrapper .jp-OutputPrompt{overflow:unset}.jupyter-wrapper .jp-RenderedText{font-size:var(--jp-code-font-size)}.jupyter-wrapper .highlight-ipynb{overflow:auto}.jupyter-wrapper .highlight-ipynb pre{margin:0;padding:5px 10px}.jupyter-wrapper table{width:max-content}.jupyter-wrapper table.dataframe{margin-left:auto;margin-right:auto;border:none;border-collapse:collapse;border-spacing:0;color:#000;font-size:12px;table-layout:fixed}.jupyter-wrapper table.dataframe thead{border-bottom:1px solid black;vertical-align:bottom}.jupyter-wrapper table.dataframe tr,.jupyter-wrapper table.dataframe th,.jupyter-wrapper table.dataframe td{text-align:right;vertical-align:middle;padding:.5em;line-height:normal;white-space:normal;max-width:none;border:none}.jupyter-wrapper table.dataframe th{font-weight:700}.jupyter-wrapper table.dataframe tbody tr:nth-child(odd){background:#f5f5f5}.jupyter-wrapper table.dataframe tbody tr:hover{background:rgba(66,165,245,.2)}.jupyter-wrapper *+table{margin-top:1em}.jupyter-wrapper .jp-InputArea-editor{position:relative}.jupyter-wrapper .zeroclipboard-container{position:absolute;top:-3px;right:0;z-index:1}.jupyter-wrapper .zeroclipboard-container clipboard-copy{-webkit-appearance:button;-moz-appearance:button;padding:7px 5px;font:11px system-ui,sans-serif;display:inline-block;cursor:default}.jupyter-wrapper .zeroclipboard-container clipboard-copy:hover{cursor:pointer}.jupyter-wrapper .zeroclipboard-container .clipboard-copy-icon{width:15px;padding:2px 0;color:#57606a;vertical-align:text-bottom}.jupyter-wrapper .clipboard-copy-txt{display:none}[data-md-color-scheme=slate] .highlight pre{background-color:#21222c;padding:10px;overflow:auto}[data-md-color-scheme=slate] .clipboard-copy-icon{color:#555!important}[data-md-color-scheme=slate] .celltoolbar{background:#333!important}[data-md-color-scheme=slate] .celltoolbar .tags_button_container .tag-container .cell-tag{background-color:transparent!important;border:1px solid #666!important}[data-md-color-scheme=slate] table.dataframe{color:#e9ebfc}[data-md-color-scheme=slate] table.dataframe thead{border-bottom:1px solid rgba(233,235,252,.12)}[data-md-color-scheme=slate] table.dataframe tbody tr:nth-child(odd){background:#222}[data-md-color-scheme=slate] table.dataframe tbody tr:hover{background:rgba(66,165,245,.2)}table{width:max-content} .jupyter-wrapper{--jp-shadow-base-lightness: 0;--jp-shadow-umbra-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .2 );--jp-shadow-penumbra-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .14 );--jp-shadow-ambient-color: rgba( var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), var(--jp-shadow-base-lightness), .12 );--jp-elevation-z0: none;--jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color), 0px 1px 1px 0px var(--jp-shadow-penumbra-color), 0px 1px 3px 0px var(--jp-shadow-ambient-color);--jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color), 0px 2px 2px 0px var(--jp-shadow-penumbra-color), 0px 1px 5px 0px var(--jp-shadow-ambient-color);--jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color), 0px 4px 5px 0px var(--jp-shadow-penumbra-color), 0px 1px 10px 0px var(--jp-shadow-ambient-color);--jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color), 0px 6px 10px 0px var(--jp-shadow-penumbra-color), 0px 1px 18px 0px var(--jp-shadow-ambient-color);--jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color), 0px 8px 10px 1px var(--jp-shadow-penumbra-color), 0px 3px 14px 2px var(--jp-shadow-ambient-color);--jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color), 0px 12px 17px 2px var(--jp-shadow-penumbra-color), 0px 5px 22px 4px var(--jp-shadow-ambient-color);--jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color), 0px 16px 24px 2px var(--jp-shadow-penumbra-color), 0px 6px 30px 5px var(--jp-shadow-ambient-color);--jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color), 0px 20px 31px 3px var(--jp-shadow-penumbra-color), 0px 8px 38px 7px var(--jp-shadow-ambient-color);--jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color), 0px 24px 38px 3px var(--jp-shadow-penumbra-color), 0px 9px 46px 8px var(--jp-shadow-ambient-color);--jp-border-width: 1px;--jp-border-color0: var(--md-grey-400);--jp-border-color1: var(--md-grey-400);--jp-border-color2: var(--md-grey-300);--jp-border-color3: var(--md-grey-200);--jp-inverse-border-color: var(--md-grey-600);--jp-border-radius: 2px;--jp-ui-font-scale-factor: 1.2;--jp-ui-font-size0: .83333em;--jp-ui-font-size1: 13px;--jp-ui-font-size2: 1.2em;--jp-ui-font-size3: 1.44em;--jp-ui-font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";--jp-ui-font-color0: rgba(0, 0, 0, 1);--jp-ui-font-color1: rgba(0, 0, 0, .87);--jp-ui-font-color2: rgba(0, 0, 0, .54);--jp-ui-font-color3: rgba(0, 0, 0, .38);--jp-ui-inverse-font-color0: rgba(255, 255, 255, 1);--jp-ui-inverse-font-color1: rgba(255, 255, 255, 1);--jp-ui-inverse-font-color2: rgba(255, 255, 255, .7);--jp-ui-inverse-font-color3: rgba(255, 255, 255, .5);--jp-content-line-height: 1.6;--jp-content-font-scale-factor: 1.2;--jp-content-font-size0: .83333em;--jp-content-font-size1: 14px;--jp-content-font-size2: 1.2em;--jp-content-font-size3: 1.44em;--jp-content-font-size4: 1.728em;--jp-content-font-size5: 2.0736em;--jp-content-presentation-font-size1: 17px;--jp-content-heading-line-height: 1;--jp-content-heading-margin-top: 1.2em;--jp-content-heading-margin-bottom: .8em;--jp-content-heading-font-weight: 500;--jp-content-font-color0: rgba(0, 0, 0, 1);--jp-content-font-color1: rgba(0, 0, 0, .87);--jp-content-font-color2: rgba(0, 0, 0, .54);--jp-content-font-color3: rgba(0, 0, 0, .38);--jp-content-link-color: var(--md-blue-700);--jp-content-font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";--jp-code-font-size: 13px;--jp-code-line-height: 1.3077;--jp-code-padding: 5px;--jp-code-font-family-default: Menlo, Consolas, \"DejaVu Sans Mono\", monospace;--jp-code-font-family: var(--jp-code-font-family-default);--jp-code-presentation-font-size: 16px;--jp-code-cursor-width0: 1.4px;--jp-code-cursor-width1: 2px;--jp-code-cursor-width2: 4px;--jp-layout-color0: white;--jp-layout-color1: white;--jp-layout-color2: var(--md-grey-200);--jp-layout-color3: var(--md-grey-400);--jp-layout-color4: var(--md-grey-600);--jp-inverse-layout-color0: #111111;--jp-inverse-layout-color1: var(--md-grey-900);--jp-inverse-layout-color2: var(--md-grey-800);--jp-inverse-layout-color3: var(--md-grey-700);--jp-inverse-layout-color4: var(--md-grey-600);--jp-brand-color0: var(--md-blue-900);--jp-brand-color1: var(--md-blue-700);--jp-brand-color2: var(--md-blue-300);--jp-brand-color3: var(--md-blue-100);--jp-brand-color4: var(--md-blue-50);--jp-accent-color0: var(--md-green-900);--jp-accent-color1: var(--md-green-700);--jp-accent-color2: var(--md-green-300);--jp-accent-color3: var(--md-green-100);--jp-warn-color0: var(--md-orange-900);--jp-warn-color1: var(--md-orange-700);--jp-warn-color2: var(--md-orange-300);--jp-warn-color3: var(--md-orange-100);--jp-error-color0: var(--md-red-900);--jp-error-color1: var(--md-red-700);--jp-error-color2: var(--md-red-300);--jp-error-color3: var(--md-red-100);--jp-success-color0: var(--md-green-900);--jp-success-color1: var(--md-green-700);--jp-success-color2: var(--md-green-300);--jp-success-color3: var(--md-green-100);--jp-info-color0: var(--md-cyan-900);--jp-info-color1: var(--md-cyan-700);--jp-info-color2: var(--md-cyan-300);--jp-info-color3: var(--md-cyan-100);--jp-cell-padding: 5px;--jp-cell-collapser-width: 8px;--jp-cell-collapser-min-height: 20px;--jp-cell-collapser-not-active-hover-opacity: .6;--jp-cell-editor-background: var(--md-grey-100);--jp-cell-editor-border-color: var(--md-grey-300);--jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);--jp-cell-editor-active-background: var(--jp-layout-color0);--jp-cell-editor-active-border-color: var(--jp-brand-color1);--jp-cell-prompt-width: 64px;--jp-cell-prompt-font-family: var(--jp-code-font-family-default);--jp-cell-prompt-letter-spacing: 0px;--jp-cell-prompt-opacity: 1;--jp-cell-prompt-not-active-opacity: .5;--jp-cell-prompt-not-active-font-color: var(--md-grey-700);--jp-cell-inprompt-font-color: #307fc1;--jp-cell-outprompt-font-color: #bf5b3d;--jp-notebook-padding: 10px;--jp-notebook-select-background: var(--jp-layout-color1);--jp-notebook-multiselected-color: var(--md-blue-50);--jp-notebook-scroll-padding: calc( 100% - var(--jp-code-font-size) * var(--jp-code-line-height) - var(--jp-code-padding) - var(--jp-cell-padding) - 1px );--jp-rendermime-error-background: #fdd;--jp-rendermime-table-row-background: var(--md-grey-100);--jp-rendermime-table-row-hover-background: var(--md-light-blue-50);--jp-dialog-background: rgba(0, 0, 0, .25);--jp-console-padding: 10px;--jp-toolbar-border-color: var(--jp-border-color1);--jp-toolbar-micro-height: 8px;--jp-toolbar-background: var(--jp-layout-color1);--jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, .24);--jp-toolbar-header-margin: 4px 4px 0px 4px;--jp-toolbar-active-background: var(--md-grey-300);--jp-statusbar-height: 24px;--jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);--jp-input-active-background: var(--jp-layout-color1);--jp-input-hover-background: var(--jp-layout-color1);--jp-input-background: var(--md-grey-100);--jp-input-border-color: var(--jp-inverse-border-color);--jp-input-active-border-color: var(--jp-brand-color1);--jp-input-active-box-shadow-color: rgba(19, 124, 189, .3);--jp-editor-selected-background: #d9d9d9;--jp-editor-selected-focused-background: #d7d4f0;--jp-editor-cursor-color: var(--jp-ui-font-color0);--jp-mirror-editor-keyword-color: #008000;--jp-mirror-editor-atom-color: #88f;--jp-mirror-editor-number-color: #080;--jp-mirror-editor-def-color: #00f;--jp-mirror-editor-variable-color: var(--md-grey-900);--jp-mirror-editor-variable-2-color: #05a;--jp-mirror-editor-variable-3-color: #085;--jp-mirror-editor-punctuation-color: #05a;--jp-mirror-editor-property-color: #05a;--jp-mirror-editor-operator-color: #aa22ff;--jp-mirror-editor-comment-color: #408080;--jp-mirror-editor-string-color: #ba2121;--jp-mirror-editor-string-2-color: #708;--jp-mirror-editor-meta-color: #aa22ff;--jp-mirror-editor-qualifier-color: #555;--jp-mirror-editor-builtin-color: #008000;--jp-mirror-editor-bracket-color: #997;--jp-mirror-editor-tag-color: #170;--jp-mirror-editor-attribute-color: #00c;--jp-mirror-editor-header-color: blue;--jp-mirror-editor-quote-color: #090;--jp-mirror-editor-link-color: #00c;--jp-mirror-editor-error-color: #f00;--jp-mirror-editor-hr-color: #999;--jp-collaborator-color1: #ffad8e;--jp-collaborator-color2: #dac83d;--jp-collaborator-color3: #72dd76;--jp-collaborator-color4: #00e4d0;--jp-collaborator-color5: #45d4ff;--jp-collaborator-color6: #e2b1ff;--jp-collaborator-color7: #ff9de6;--jp-vega-background: white;--jp-sidebar-min-width: 250px;--jp-search-toggle-off-opacity: .5;--jp-search-toggle-hover-opacity: .8;--jp-search-toggle-on-opacity: 1;--jp-search-selected-match-background-color: rgb(245, 200, 0);--jp-search-selected-match-color: black;--jp-search-unselected-match-background-color: var( --jp-inverse-layout-color0 );--jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);--jp-icon-contrast-color0: var(--md-purple-600);--jp-icon-contrast-color1: var(--md-green-600);--jp-icon-contrast-color2: var(--md-pink-600);--jp-icon-contrast-color3: var(--md-blue-600);--jp-jupyter-icon-color: #f37626;--jp-notebook-icon-color: #f37626;--jp-json-icon-color: var(--md-orange-700);--jp-console-icon-background-color: var(--md-blue-700);--jp-console-icon-color: white;--jp-terminal-icon-background-color: var(--md-grey-800);--jp-terminal-icon-color: var(--md-grey-200);--jp-text-editor-icon-color: var(--md-grey-700);--jp-inspector-icon-color: var(--md-grey-700);--jp-switch-color: var(--md-grey-400);--jp-switch-true-position-color: var(--md-orange-900)} init_mathjax = function() { if (window.MathJax) { // MathJax loaded MathJax.Hub.Config({ TeX: { equationNumbers: { autoNumber: \"AMS\", useLabelIds: true } }, tex2jax: { inlineMath: [ ['$','$'], [\"\\\\(\",\"\\\\)\"] ], displayMath: [ ['$$','$$'], [\"\\\\[\",\"\\\\]\"] ], processEscapes: true, processEnvironments: true }, displayAlign: 'center', CommonHTML: { linebreaks: { automatic: true } } }); MathJax.Hub.Queue([\"Typeset\", MathJax.Hub]); } } init_mathjax(); document.addEventListener(\"DOMContentLoaded\", async () => { const diagrams = document.querySelectorAll(\".jp-Mermaid > pre.mermaid\"); // do not load mermaidjs if not needed if (!diagrams.length) { return; } const mermaid = (await import(\"https://cdnjs.cloudflare.com/ajax/libs/mermaid/10.7.0/mermaid.esm.min.mjs\")).default; const parser = new DOMParser(); mermaid.initialize({ maxTextSize: 100000, maxEdges: 100000, startOnLoad: false, fontFamily: window .getComputedStyle(document.body) .getPropertyValue(\"--jp-ui-font-family\"), theme: document.querySelector(\"body[data-jp-theme-light='true']\") ? \"default\" : \"dark\", }); let _nextMermaidId = 0; function makeMermaidImage(svg) { const img = document.createElement(\"img\"); const doc = parser.parseFromString(svg, \"image/svg+xml\"); const svgEl = doc.querySelector(\"svg\"); const { maxWidth } = svgEl?.style || {}; const firstTitle = doc.querySelector(\"title\"); const firstDesc = doc.querySelector(\"desc\"); img.setAttribute(\"src\", `data:image/svg+xml,${encodeURIComponent(svg)}`); if (maxWidth) { img.width = parseInt(maxWidth); } if (firstTitle) { img.setAttribute(\"alt\", firstTitle.textContent); } if (firstDesc) { const caption = document.createElement(\"figcaption\"); caption.className = \"sr-only\"; caption.textContent = firstDesc.textContent; return [img, caption]; } return [img]; } async function makeMermaidError(text) { let errorMessage = \"\"; try { await mermaid.parse(text); } catch (err) { errorMessage = `${err}`; } const result = document.createElement(\"details\"); result.className = 'jp-RenderedMermaid-Details'; const summary = document.createElement(\"summary\"); summary.className = 'jp-RenderedMermaid-Summary'; const pre = document.createElement(\"pre\"); const code = document.createElement(\"code\"); code.innerText = text; pre.appendChild(code); summary.appendChild(pre); result.appendChild(summary); const warning = document.createElement(\"pre\"); warning.innerText = errorMessage; result.appendChild(warning); return [result]; } async function renderOneMarmaid(src) { const id = `jp-mermaid-${_nextMermaidId++}`; const parent = src.parentNode; let raw = src.textContent.trim(); const el = document.createElement(\"div\"); el.style.visibility = \"hidden\"; document.body.appendChild(el); let results = null; let output = null; try { let { svg } = await mermaid.render(id, raw, el); svg = cleanMermaidSvg(svg); results = makeMermaidImage(svg); output = document.createElement(\"figure\"); results.map(output.appendChild, output); } catch (err) { parent.classList.add(\"jp-mod-warning\"); results = await makeMermaidError(raw); output = results[0]; } finally { el.remove(); } parent.classList.add(\"jp-RenderedMermaid\"); parent.appendChild(output); } /** * Post-process to ensure mermaid diagrams contain only valid SVG and XHTML. */ function cleanMermaidSvg(svg) { return svg.replace(RE_VOID_ELEMENT, replaceVoidElement); } /** * A regular expression for all void elements, which may include attributes and * a slash. * * @see https://developer.mozilla.org/en-US/docs/Glossary/Void_element * * Of these, only `