You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi, I am trying to use the the ProductInterpolationKernel (SKIP) in a multi-output GP problems. I referred the examples both for SKIP and for BatchIndependentGP. However, my model still reports shape error, the code is as follows,
class BatchIndependentMultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([10]))
self.base_covar_module = RBFKernel(batch_shape=torch.Size([10]))
self.covar_module = ProductStructureKernel(
ScaleKernel(
GridInterpolationKernel(self.base_covar_module, grid_size=10, num_dims=1),
batch_shape=torch.Size([10])
), num_dims=16
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(
gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
)
train_x = torch.randn(size=[1000, 16]).cuda()
train_y = torch.randn(size=[1000, 10]).cuda()
likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=10)
model = BatchIndependentMultitaskGPModel(train_x, train_y, likelihood)
if torch.cuda.is_available():
model = model.cuda()
likelihood = likelihood.cuda()
model.train()
likelihood.train()
with gpytorch.settings.use_toeplitz(False), gpytorch.settings.max_root_decomposition_size(30):
# Get output from model
output = model(train_x)
It reports the error message. Could you please help me with this error ?
output = model(train_x)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/models/exact_gp.py", line 266, in __call__
res = super().__call__(*inputs, **kwargs)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/module.py", line 28, in __call__
outputs = self.forward(*inputs, **kwargs)
File "exp/flipmnist/test.py", line 33, in forward
covar_x = self.covar_module(x)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/product_structure_kernel.py", line 77, in __call__
res = lazify(res).evaluate_kernel()
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/utils/memoize.py", line 51, in g
return _add_to_cache(self, cache_name, method(self, *args, **kwargs), *args, kwargs_pkl=kwargs_pkl)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 274, in evaluate_kernel
res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/product_structure_kernel.py", line 76, in __call__
res = super().__call__(x1_, x2_, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/kernel.py", line 396, in __call__
res = lazify(super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params))
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/module.py", line 28, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/product_structure_kernel.py", line 57, in forward
res = self.base_kernel(x1, x2, diag=diag, last_dim_is_batch=True, **params)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/kernel.py", line 396, in __call__
res = lazify(super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params))
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/module.py", line 28, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/scale_kernel.py", line 92, in forward
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 189, in forward
base_lazy_tsr.batch_shape, left_interp_indices.shape[:-2], right_interp_indices.shape[:-2],
File "/h/ssy/.conda/envs/cl/lib/python3.7/site-packages/gpytorch/utils/broadcasting.py", line 20, in _mul_broadcast_shape
raise RuntimeError("Shapes are not broadcastable for mul operation")
RuntimeError: Shapes are not broadcastable for mul operation
The text was updated successfully, but these errors were encountered:
Hi, I am trying to use the the ProductInterpolationKernel (SKIP) in a multi-output GP problems. I referred the examples both for SKIP and for BatchIndependentGP. However, my model still reports shape error, the code is as follows,
It reports the error message. Could you please help me with this error ?
The text was updated successfully, but these errors were encountered: