Evaluating the bivariate distribution is expensive when you assign only 500 grids for each variable. Is there any way to reduce the memory usage on this? Here is a code for illustration:
import torch
import torch.distributions as dist
import numpy as np
def CDF_update_bivariate(mean, cov, PDF, x, y):
asr = 0.9
new_dist = dist.MultivariateNormal(torch.tensor(mean), torch.tensor(cov))
X, Y = torch.meshgrid(x, y, indexing='ij')
grid = torch.stack((X.flatten(), Y.flatten()), dim=1)
pdf_new = torch.exp(new_dist.log_prob(grid)).reshape(500, 500)
weighted_sum = asr * PDF + (1 - asr) * pdf_new
normalized_pdf = weighted_sum / torch.sum(weighted_sum) * x.size(0) * y.size(0) / ((x[-1] - x[0]) * (y[-1] - y[0]))
cdf = torch.cumsum(torch.cumsum(normalized_pdf, dim=0), dim=1)
normalized_cdf = cdf / cdf[-1, -1]
return normalized_pdf, normalized_cdf
x = torch.linspace(0.1 + 1e-4, 3, 500).double()
sk_max = np.exp(3 * 0.01 / np.sqrt(1 - 0.8**2))
sk_min = np.exp(-3 * 0.01 / np.sqrt(1 - 0.8**2))
y = torch.linspace(sk_min, sk_max, 500).double()
X, Y = torch.meshgrid(x, y, indexing='ij')
area = (x[-1] - x[0]) * (y[-1] - y[0])
PDF = torch.full(X.shape, 1.0 / area)
cdf_x = (X - x[0]) / (x[-1] - x[0])
cdf_y = (Y - y[0]) / (y[-1] - y[0])
CDF = cdf_x * cdf_y
mean = [0, 0]
cov = [[1, 0], [0, 1]]
PDF,CDF = CDF_update_bivariate(mean, cov, PDF, x, y)
where the distribution evaluating part is
X, Y = torch.meshgrid(x, y, indexing='ij')
grid = torch.stack((X.flatten(), Y.flatten()), dim=1)
pdf_new = torch.exp(new_dist.log_prob(grid)).reshape(500, 500)