-
-
Notifications
You must be signed in to change notification settings - Fork 996
Closed
Labels
Description
[Moved the discussion in this forum thread to here.]
Doing MLE on the skewness parameter of Stable distribution does not recover the original parameter. The skewness tends to stay around 0 regardless initial values.
# adapted from https://github.com/fritzo/notebooks/blob/master/stable_mle.ipynb
import math
import matplotlib.pyplot as plt
import torch
import pyro
import pyro.distributions as dist
from pyro.distributions import constraints
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.reparam import MinimalReparam
from pyro.infer.autoguide import AutoNormal, AutoGaussian
torch.set_default_dtype(torch.float64)
pyro.set_rng_seed(20230928)
# Define true parameters and number of datapoints
alpha = 1.5
beta = 0.8
c = 1.0
mu = 0.0
n = 10000
# sample data
data = dist.Stable(alpha, beta, c, mu).sample((n,))
@MinimalReparam()
def model(data):
alpha = 1.5 # pyro.param("alpha", torch.tensor(1.99), constraint=constraints.interval(0, 2))
beta = pyro.param("beta", torch.tensor(0.5), constraint=constraints.interval(-1, 1))
c = 1.0 # pyro.param("c", torch.tensor(1.0), constraint=constraints.positive)
mu = 0.0 # pyro.param("mu", torch.tensor(0.0), constraint=constraints.real)
with pyro.plate("data", data.shape[0]):
pyro.sample("obs", dist.Stable(alpha, beta, c, mu), obs=data)
def train(model, guide, num_steps=1001, lr=0.1):
pyro.clear_param_store()
pyro.set_rng_seed(20230928)
# set up ELBO, and optimizer
elbo = Trace_ELBO()
elbo.loss(model, guide, data=data)
optim = pyro.optim.Adam({"lr": lr})
svi = SVI(model, guide, optim, loss=elbo)
# optimize
losses = []
for i in range(num_steps):
loss = svi.step(data) / data.numel()
losses.append(loss)
if i % 100 == 0:
print(f"step {i} loss = {loss:0.6g}")
print(f"Parameter estimates (n = {n}):")
print(f"beta: Estimate = {pyro.param('beta')}, true = {beta}")
return losses
guide = AutoNormal(model)
train(model, guide);
gives us something like
step 0 loss = 57.484
step 100 loss = 2.82283
step 200 loss = 2.64291
step 300 loss = 2.57734
step 400 loss = 2.57825
step 500 loss = 2.55413
step 600 loss = 2.58021
step 700 loss = 2.55925
step 800 loss = 2.56627
step 900 loss = 2.55161
step 1000 loss = 2.54688
Parameter estimates (n = 10000):
beta: Estimate = -0.0007896144629401247, true = 0.8