Один из них - это применение вероятностных моделей или методов глубокого обучения, которые выводят распределение вероятностей для предсказания.
Байесовские нейронные сети (Bayesian Neural Networks) расширяют обычные нейронные сети, вводя вероятностные параметры, что позволяет выводить распределение выходных значений. Библиотека Pyro для PyTorch
import torch
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO
from pyro.optim import Adam
import pyro.nn as pnn
class BayesianNN(pnn.PyroModule):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = pnn.PyroModule[nn.Linear](in_features, out_features)
self.linear.weight = pnn.PyroSample(dist.Normal(0., 1.).expand([out_features, in_features]).to_event(2))
self.linear.bias = pnn.PyroSample(dist.Normal(0., 10.).expand([out_features]).to_event(1))
def forward(self, x, y=None):
mean = self.linear(x)
sigma = pyro.sample("sigma", dist.Uniform(0., 10.))
with pyro.plate("data", x.shape[0]):
obs = pyro.sample("obs", dist.Normal(mean, sigma), obs=y)
return mean
def model(x, y=None):
return BayesianNN(x.shape[1], 1)(x, y)
def guide(x, y=None):
bayesian_nn = BayesianNN(x.shape[1], 1)
bayesian_nn.linear.weight = pnn.PyroSample(dist.Normal(torch.randn(bayesian_nn.linear.weight.shape), 0.1)
.expand(bayesian_nn.linear.weight.shape).to_event(2))
bayesian_nn.linear.bias = pnn.PyroSample(dist.Normal(torch.randn(bayesian_nn.linear.bias.shape), 0.1)
.expand(bayesian_nn.linear.bias.shape).to_event(1))
return bayesian_nn(x, y)
x = torch.randn(100, 1)
y = 3 * x + torch.randn(100, 1)
pyro.clear_param_store()
svi = SVI(model, guide, Adam({"lr": 0.01}), loss=Trace_ELBO())
num_iterations = 5000
for j in range(num_iterations):
loss = svi.step(x, y)
if j % 1000 == 0:
print(f"[iteration {j+1}] loss: {loss}")
predictive = pyro.infer.Predictive(model, guide=guide, num_samples=1000)
samples = predictive(x)
Гетероскедастическая регрессия на Keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Создание модели
inputs = keras.Input(shape=(num_features,))
x = layers.Dense(64, activation="relu")(inputs)
x = layers.Dense(64, activation="relu")(x)
mean = layers.Dense(1)(x)
log_var = layers.Dense(1)(x)
model = keras.Model(inputs=inputs, outputs=[mean, log_var])
# Функция потерь
def nll(y_true, y_pred):
mean, log_var = y_pred
precision = tf.exp(-log_var)
return tf.reduce_mean(0.5 * tf.math.log(2 * np.pi) + 0.5 * log_var + 0.5 * tf.square(y_true - mean) * precision)
model.compile(optimizer="adam", loss=nll)
# Обучение модели
model.fit(x_train, [y_train, y_train], epochs=100, batch_size=32)
Ставь 👍 и забирай 📚 Базу знаний