PyTorch线性拟合 线性回归实验

import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable

w_target = torch.randn(3, 1) * 5
b_target = torch.randn(1) * 5

# print(b_target, torch.randn(1))

def get_batch(batch_size=32):
    x = torch.randn(batch_size, w_target.size(0))
    y = x.mm(w_target + torch.randn(w_target.size(0), 1)) + b_target[0] + torch.rand(1)[0]
    return Variable(x), Variable(y)

fc = torch.nn.Linear(w_target.size(0), 1)

for batch_idx in range(2000):
    batch_x, batch_y = get_batch()
    fc.zero_grad()

    loss = torch.mean((fc(batch_x) - batch_y).pow(2))
    2# loss = F.smooth_l1_loss(fc(batch_x), batch_y)

    loss.backward()

    for param in fc.parameters():
        param.data.add_(-0.01 * param.grad.data)

    if loss.data[0] < 1e-4:
        break

print(f"{loss.data[0]:.5} {batch_idx}")
print('Learned:', fc.weight.data.numpy(), fc.bias.data.numpy())
print('Actual:', w_target.numpy().transpose(1, 0), b_target.numpy())


发表于:2017-11-16 13:18:07

原文链接(转载请保留): http://www.multisilicon.com/blog/a24302819.html

友情链接: MICROIC
首页