I was trying to replicate the Heteroskedastic Likelihood and Multi-Latent GP example from the GPFlow library site. Everything ran fine except for the last snippet of code under "Run Optimization Loop" and is throwing an error: AttributeError: 'NaturalGradient' object has no attribute '_name'.
Question: Is there something I can do to fix this?
Code to run
# [1] Import packages
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import gpflow as gpf
# [2]: Generate data to be modeled
N = 1001
np.random.seed(0)
tf.random.set_seed(0)
# Build inputs X
X = np.linspace(0, 4 * np.pi, N)[:, None] # X must be of shape [N, 1]
# Deterministic functions in place of latent ones
f1 = np.sin
f2 = np.cos
# Use transform = exp to ensure positive-only scale values
transform = np.exp
# Compute loc and scale as functions of input X
loc = f1(X)
scale = transform(f2(X))
# Sample outputs Y from Gaussian Likelihood
Y = np.random.normal(loc, scale)
# [3]: Plot the data generated above to see what they look like
def plot_distribution(X, Y, loc, scale):
plt.figure(figsize=(15, 5))
x = X.squeeze()
for k in (1, 2):
lb = (loc - k * scale).squeeze()
ub = (loc + k * scale).squeeze()
plt.fill_between(x, lb, ub, color="silver", alpha=1 - 0.05 * k ** 3)
plt.plot(x, lb, color="silver")
plt.plot(x, ub, color="silver")
plt.plot(X, loc, color="black")
plt.scatter(X, Y, color="gray", alpha=0.8)
plt.show()
plt.close()
#plot_distribution(X, Y, loc, scale)
# [4]: Set up the likelihood function for the model we want
likelihood = gpf.likelihoods.HeteroskedasticTFPConditional(
distribution_class=tfp.distributions.Normal, # Gaussian Likelihood
scale_transform=tfp.bijectors.Exp(), # Exponential Transform
)
print(f"Likelihood's expected latent_dim: {likelihood.latent_dim}")
# [5]: Specify the kernels of the mean and variance GPs
kernel = gpf.kernels.SeparateIndependent(
[
gpf.kernels.SquaredExponential(), # This is k1, the kernel of f1
gpf.kernels.SquaredExponential(), # this is k2, the kernel of f2
]
)
# [6]: We will use the SVGP model which requires inducing points
M = 20 # Number of inducing variables for each f_i
# Initial inducing points position Z
Z = np.linspace(X.min(), X.max(), M)[:, None] # Z must be of shape [M, 1]
inducing_variable = gpf.inducing_variables.SeparateIndependentInducingVariables(
[
gpf.inducing_variables.InducingPoints(Z), # This is U1 = f1(Z1)
gpf.inducing_variables.InducingPoints(Z), # This is U2 = f2(Z2)
]
)
# [7]: Build the SVGP model using the components instantiated above
model = gpf.models.SVGP(
kernel=kernel,
likelihood=likelihood,
inducing_variable=inducing_variable,
num_latent_gps=likelihood.latent_dim,
)
model
# [8]: Build optimizers (NatGrad + Adam)
data = (X, Y)
loss_fn = model.training_loss_closure(data)
gpf.utilities.set_trainable(model.q_mu, False)
gpf.utilities.set_trainable(model.q_sqrt, False)
variational_vars = [(model.q_mu, model.q_sqrt)]
natgrad_opt = gpf.optimizers.NaturalGradient(gamma=0.1)
adam_vars = model.trainable_variables
adam_opt = tf.optimizers.Adam(0.01)
@tf.function
def optimisation_step():
natgrad_opt.minimize(loss_fn, variational_vars)
adam_opt.minimize(loss_fn, adam_vars)
# [9]: Run optimization loop
epochs = 100
log_freq = 20
for epoch in range(1, epochs + 1):
optimisation_step()
# For every 'log_freq' epochs, print the epoch and plot the predictions against the data
if epoch % log_freq == 0 and epoch > 0:
print(f"Epoch {epoch} - Loss: {loss_fn().numpy() : .4f}")
Ymean, Yvar = model.predict_y(X)
Ymean = Ymean.numpy().squeeze()
Ystd = tf.sqrt(Yvar).numpy().squeeze()
plot_distribution(X, Y, Ymean, Ystd)
model
versions of packages
print(tf.__version__) --> 2.11.0
print(tfp.__version__) --> 0.19.0
print(gf.__version__) --> 2.7.1
More Details
More of the error message is as follows:
AttributeError Traceback (most recent call last)
<ipython-input-11-aee9c220de71> in <module>
3
4 for epoch in range(1, epochs + 1):
----> 5 optimisation_step()
...and continues
/usr/local/lib/python3.9/dist-packages/gpflow/optimizers/natgrad.py in tf___natgrad_steps(self, loss_fn, parameters)
21 loss = ag__.converted_call(ag__.ld(loss_fn), (), None, fscope)
22 (q_mu_grads, q_sqrt_grads) = ag__.converted_call(ag__.ld(tape).gradient, (ag__.ld(loss), [ag__.ld(q_mu_vars), ag__.ld(q_sqrt_vars)]), None, fscope)
---> 23 with ag__.ld(tf).name_scope(f'{ag__.ld(self)._name}/natural_gradient_steps'):
24
25 def get_state():
AttributeError: in user code:
File "<ipython-input-10-2722f09bae9a>", line 16, in optimisation_step *
natgrad_opt.minimize(loss_fn, variational_vars)
File "/usr/local/lib/python3.9/dist-packages/check_shapes/integration/tf.py", line 208, in wrapped_method *
return wrapped_function(self, *args, **kwargs)
File "/usr/local/lib/python3.9/dist-packages/check_shapes/decorator.py", line 120, in wrapped_function *
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/dist-packages/gpflow/optimizers/natgrad.py", line 236, in minimize *
self._natgrad_steps(loss_fn, parameters)
File "/usr/local/lib/python3.9/dist-packages/check_shapes/integration/tf.py", line 208, in wrapped_method *
return wrapped_function(self, *args, **kwargs)
File "/usr/local/lib/python3.9/dist-packages/check_shapes/decorator.py", line 120, in wrapped_function *
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/dist-packages/gpflow/optimizers/natgrad.py", line 266, in _natgrad_steps *
with tf.name_scope(f"{self._name}/natural_gradient_steps"):
AttributeError: 'NaturalGradient' object has no attribute '_name'
I tried changing Tensorflow to an older version which did not work. I also tried looking at GPFlow source code for NaturalGradient but I'm afraid this didn't help me come to a solution.
I ran into the same issue but I found a workaround. You can just set the name for now using something like:
It's not pretty but it seems to do the job. The constructor also takes a name parameter but it does not seem to store it to the right attribute (self.name not self._name).