Skip to content
Snippets Groups Projects
Commit 060d2ae1 authored by Shashank Singh's avatar Shashank Singh
Browse files

Added sketch of basic fitting experiment.

parent fe79627b
No related branches found
No related tags found
No related merge requests found
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from tqdm import tqdm
num_replicates = 5 # Number of IID replications of experiment
n_train = 1000 # Training sample size
n_test = 100 # Test sample size
num_iters = 1001 # Number of training iterations
alpha_Z = tf.Variable([2.], name='alpha_Z')
alpha_eps = tf.Variable([3., 1.], name='alpha_eps')
beta_X = tf.Variable([0.], name='beta_X')
beta_eps = tf.Variable([1., 3.], name='beta_eps')
def construct_data_generating_model(alpha_Z, alpha_eps, beta_X, beta_eps):
return tfd.JointDistributionSequential([
tfd.Normal(loc=[0.], scale=1., name='Z', validate_args=True),
tfd.Normal(loc=0., scale=[1., 1.], name='eps', validate_args=True),
lambda eps, Z: tfd.Normal(
loc=[tf.tensordot(alpha_Z, Z, axes=1) + tf.tensordot(alpha_eps, eps, axes=1)],
scale=1.,
name='X', validate_args=True),
lambda X, eps: tfd.Normal(
loc=tf.tensordot(beta_X, X, axes=1) + tf.tensordot(beta_eps, eps, axes=1),
scale=1.,
name='Y', validate_args=True),
], batch_ndims=0, use_vectorized_map=True)
joint = construct_data_generating_model(alpha_Z, alpha_eps, beta_X, beta_eps)
def construct_data_fitting_model(alpha_Z, beta_X):
return tfd.JointDistributionSequential([
tfd.Normal(loc=[0.], scale=1., name='Z', validate_args=True),
lambda Z: tfd.Normal(
loc=[tf.tensordot(alpha_Z, Z, axes=1)],
scale=1.,
name='X', validate_args=True),
lambda X, Z: tfd.Normal(
loc=beta_X*tf.tensordot(alpha_Z, Z, axes=1),
scale=1.,
name='Y', validate_args=True),
], batch_ndims=0, use_vectorized_map=True)
def fit_model(Z, X, Y):
trainable_model_args = [
tf.Variable(np.random.normal([0], 1), dtype=np.float32, name='alpha_Z_hat'),
tf.Variable(np.random.normal([0], 1), dtype=np.float32, name='beta_X_hat')
]
# SPECIFY LOG-LIKELIHOOD TRAINING OBJECTIVE AND OPTIMIZER
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
def log_prob():
trainable_model = construct_data_fitting_model(*trainable_model_args)
return tf.math.reduce_sum(trainable_model.log_prob(Z, X, Y))
@tf.function(autograph=True)
def train_op(trainable_model_args):
"""Apply a gradient update."""
with tf.GradientTape() as tape:
neg_log_prob = -log_prob()
grads = tape.gradient(neg_log_prob, trainable_model_args)
optimizer.apply_gradients(zip(grads, trainable_model_args))
return neg_log_prob, trainable_model_args
# loss_history = []
# alpha_Z_history = []
# beta_X_history = []
for step in range(num_iters):
loss, model_args = train_op(trainable_model_args)
# loss_history.append(loss)
# alpha_Z_history.append(model_args[0])
# beta_X_history.append(model_args[1])
print(model_args[0].numpy())
return [arg.numpy() for arg in model_args]
beta_X_hats = []
for replicate in tqdm(range(num_replicates)):
# Draw training data
Z_train, _, X_train, Y_train = joint.sample(n_train)
alpha_Z_hat, beta_X_hat = fit_model(Z_train, X_train, Y_train)
beta_X_hats.append(beta_X_hat)
plt.hist(beta_X_hats)
# plt.subplot(3, 1, 1)
# plt.plot(loss_history)
# plt.ylabel('loss')
# plt.subplot(3, 1, 2)
# plt.plot(alpha_Z_history, label=r'$\hat\alpha_Z$')
# plt.plot([1, num_iters], [2., 2.], label=r'$\alpha_Z$')
# plt.legend()
# plt.subplot(3, 1, 3)
# plt.plot(beta_X_history, label=r'$\hat\beta_X$')
# plt.plot([1, num_iters], [0., 0.], label=r'$\beta_X$')
# plt.legend()
# plt.xlabel('Training Iteration')
plt.show()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment