tfb = tfp.bijectors
tfd = tfp.distributions
####################################
prior123=tfd.MultivariateNormalTriL(loc=[110.5,105.6],scale_tril=tf.linalg.cholesky([[20.,5.],[5.,20.]]))
# Say this is the true value of `z`...
ground_truth = prior123.sample()
# ... and you have 20 noisy observations of it, and you want to use VI to
# approximate the posterior distribution of `z` conditioned on the observations.
obs = tfd.MultivariateNormalDiag(ground_truth, [1., 1.]).sample(20)
# The likelihood expresses the (log) probability of your observations,
# conditioned on `z`.
likelihood = lambda z: tf.reduce_sum(
tfd.MultivariateNormalDiag(z, [1., 1.]).log_prob(obs))
# The target model is log p(z, obs) = log p(z) + log p(obs|z)
def func123(z):
return prior123.log_prob(z) + likelihood(z)
####################################
# Define a trainable location and scale for the MultivariateNormalTriL
# surrogate posterior.
trainable_loc = tf.Variable([100.1,100.1])
trainable_scale = tfp.util.TransformedVariable(
initial_value=4.*tf.eye(2),
bijector=tfb.FillScaleTriL()
)
surrogate_posterior = tfd.MultivariateNormalTriL(
loc=trainable_loc, scale_tril=trainable_scale
)
```