x = np.random.multivariate_normal(mean = np.zeros([P]), cov = np.eye(P), size = N)
y = np.matmul(x, effects) + np.random.normal(loc = 0., scale = 1., size = N)
def model():
sigma = ed.HalfNormal(scale=1., name = "sigma")
tau = ed.HalfCauchy(loc = 0., scale = sigma, name = "tau")
beta = ed.Horseshoe(scale = tf.ones([P]) * tau, name = "beta")
residual_sigma = ed.HalfCauchy(loc=0., scale=1., name = "residual_sigma")
y = ed.Normal(loc = tf.tensordot(x.astype(np.float32), beta, [[1], [0]]), scale = 1., name = "y")
return y
log_joint = ed.make_log_joint_fn(model)
def target_log_prob_fn(sigma, tau, beta, residual_sigma):
return log_joint(
sigma = sigma, tau = tau,
beta = beta,
residual_sigma = residual_sigma,
y = y
)
num_results = 3000
num_burnin_steps = 4000
step_size = tf.get_variable(
initializer = .01,
name = "step_size"
)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=5,
current_state=[
tf.ones([], name = "init_sigma"),
tf.ones([], name = "init_tau"),
tf.random_normal([P], name = "init_beta"),
tf.ones([], name = "residual_sigma")
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=10,
step_size = step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(num_adaptation_steps=None)
)
)