发布时间:2022-12-07 19:30
回归是一种能更加多个变量之间的关系进行建模的一种方法,其在机器学习中有着官方运用。线性回归是其中最最最最简单的一种,其假设自变量与因变量之间是线性关系。利用pytorch就可以简单地写出线性回归的代码。
首先需要知道线性回归的基本假设:
这种简单的数据集,并不需要去哪里下载,直接自行生成即可。
def data_maker(w, b, n_size): # y=w*x+b,n个数据
X = torch.normal(0 , 1 , (n_size , len(w))) # n*len(w)的参数
y = torch.matmul(X , w) + b
y = y + torch.normal(0, 0.01 , y.shape)
return X , y.reshape((-1 , 1))
不失一般性,一般都是读取一个batch的,为了方便,可以利用yield
将其写成一个迭代器。
def data_iter(batch_size , x , y):
n = len(x)
index = list(range(n))
random.shuffle(index)
for i in range(0 , n , batch_size):
batch_index = torch.tensor(index[i:min(i + batch_size,n)])
yield x[batch_index], y[batch_index]
为了更好的理解,此处使用自己定义的函数。
但是需要用到pytorch的自动求梯度。
def linreg(X,w,b):
return torch.matmul(X , w) + b
def loss_function(Y , y):
return (Y - y.reshape(Y.shape))**2/2
def SGD(params , learning_rate , batch_size):
with torch.no_grad():
for param in params:
param -= learning_rate * param.grad / batch_size
param.grad.zero_()
这里和普通的模型差不多
#training
for epoch in range(num_epochs):
for X ,Y in data_iter(batch_size , x , y):
l = loss(net(X , w,b), Y)
l.sum().backward()
SGD([w,b] , 0.001 , batch_size)
with torch.no_grad():
train_loss = loss(net(x , w , b), y)
print(f'epoch{epoch+1} , loos {float(train_loss.mean())}')
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import random
# In[2]:
def data_maker(w, b, n_size): # y=w*x+b,n个数据
X = torch.normal(0 , 1 , (n_size , len(w))) # n*len(w)的参数
y = torch.matmul(X , w) + b
y = y + torch.normal(0, 0.01 , y.shape)
return X , y.reshape((-1 , 1))
# In[3]:
W = torch.tensor([4.0])
B = 1
x , y = data_maker(W, B, 1000)
# In[4]:
def data_iter(batch_size , x , y):
n = len(x)
index = list(range(n))
random.shuffle(index)
for i in range(0 , n , batch_size):
batch_index = torch.tensor(index[i:min(i + batch_size,n)])
yield x[batch_index], y[batch_index]
# In[5]:
def linreg(X,w,b):
return torch.matmul(X , w) + b
def loss_function(Y , y):
return (Y - y.reshape(Y.shape))**2/2
def SGD(params , learning_rate , batch_size):
with torch.no_grad():
for param in params:
param -= learning_rate * param.grad / batch_size
param.grad.zero_()
# In[6]:
batch_size = 5
num_epochs = 50
net = linreg
loss = loss_function
w = torch.normal(0,1 , size=(1,1) , requires_grad= True)
b = torch.normal(0,1 , size=(1,1) , requires_grad= True)
# In[7]:
#training
for epoch in range(num_epochs):
for X ,Y in data_iter(batch_size , x , y):
l = loss(net(X , w,b), Y)
l.sum().backward()
SGD([w,b] , 0.001 , batch_size)
with torch.no_grad():
train_loss = loss(net(x , w , b), y)
print(f'epoch{epoch+1} , loos {float(train_loss.mean())}')
# In[8]:
print(w , b)