深度学习与PyTorch入门实战

发布时间:2023-12-16 10:00

深度学习与PyTorch入门实战笔记1

数据和初始化

import torch
a= torch.randn(2,3) #随机初始化,正态分布
print(a)
# tensor([[-1.0912, -0.3995, -0.3796],
#         [ 0.5615, -0.2030,  0.4820]])

print(a.size()) #类中的函数
# torch.Size([2, 3])

print(list(a.size())) #可以转List
# [2, 3]

print(a.size()[0])
# 2

print(a.shape) #类中的变量
# torch.Size([2, 3])

print(list(a.shape))
# [2, 3]

print(a.shape[1])
#3

print(a.dim()) #类中的函数
#2

print(a.numel()) #这个是数量
#6

print(torch.tensor([2,3])) #传的参数为实际的数组,注意和下一个区别
# tensor([2, 3])

print(torch.FloatTensor(2,3))#传的参数为数组形状,注意区别
# tensor([[ 3.2779e-32,  1.0412e-42, -3.7955e-01],
#         [ 5.6147e-01,  0.0000e+00,  0.0000e+00]])

d = torch.IntTensor(2,3)
print(d.type()) #torch.IntTensor

torch.set_default_tensor_type(torch.FloatTensor) #改完后用tensor时会有区别

e=torch.rand(3,3) #rand均匀在01之间
print(e)
# tensor([[0.2144, 0.6302, 0.4405],
#         [0.9755, 0.4464, 0.0713],
#         [0.8457, 0.8662, 0.8452]])

print(torch.rand_like(e)) #把e的形状喂给函数,再rand
# tensor([[0.2798, 0.1630, 0.3823],
#         [0.6293, 0.2503, 0.5224],
#         [0.9113, 0.8624, 0.1692]])

print(torch.randint(1,10,[3,3]))
# tensor([[9, 7, 9],
#         [9, 4, 3],
#         [2, 9, 2]])

print(torch.full([2,3],7))
# tensor([[7, 7, 7],
#         [7, 7, 7]])
print(torch.full([],6))
#tensor(6)

print(torch.linspace(0,10,steps=4)) #0到10,分4份
#tensor([ 0.0000,  3.3333,  6.6667, 10.0000])

print(torch.logspace(0,2,steps=3)) #10的0到2次方,按次方均分3份
# tensor([  1.,  10., 100.])

索引和切片

# [:]和.copy()都属于“浅拷贝”,只拷贝最外层元素,内层嵌套元素则通过引用,而不是独立分配内存。
# https://www.cnblogs.com/malinqing/p/11272485.html
import torch
#索引
a = torch.rand(4,3,28,28)
print(a[0].
shape)
# torch.Size([3, 28, 28])
print(a[0,0].shape)
# torch.Size([28, 28])

print(a[:2].shape) #前:后,前闭后开,不到后面这个索引处,这个就是包含a0,a1
# torch.Size([2, 3, 28, 28])

print(a[:2,:1,:,:].shape)
# torch.Size([2, 1, 28, 28])

print(a[:2,1:,:,:].shape)
# torch.Size([2, 2, 28, 28])

print(a[:2,-1:,:,:].shape) #-1是倒着索引的第1个
# torch.Size([2, 1, 28, 28])
print(a[:2,-2:,:,:].shape) #-2是倒着索引的第2个
# torch.Size([2, 2, 28, 28])

print(a[:,:,0:28:2,0:20:2].shape) #后面的冒号代表间隔采样
# torch.Size([4, 3, 14, 10])

aaa =torch.tensor([0,2]) #直接给索引自定义来取值
print(a.index_select(1,aaa).shape) #注意后面这个aaa是要tensor类型的嗷
# torch.Size([4, 2, 28, 28])

print(a[:2,1,...,-2:].shape) #...代表中间所有的
# torch.Size([2, 28, 2])


# select by mask
x = torch.randn(3,4)
mask = x.ge(0.5) #x中超过0.5的设为1其他为0就是mask
print(mask)
# tensor([[ True, False, False, False],
#         [False, False,  True,  True],
#         [ True, False,  True, False]])
mm = torch.masked_select(x,mask) #把x中mask位置为1的取出来
print(mm.shape)
# torch.Size([5])

维度变换

#维度变换
import torch
a = torch.rand(4,1,28,28)
print(a.shape)

a.view(4,28,28) #torch.Size([4, 1, 28, 28])
#VIEW相当于把数据展平,再按照现在的形状填充



#unsqueeze维度增加
print(a.unsqueeze(0).shape) # torch.Size([1, 4, 1, 28, 28])
#unsqueeze0相当于在最前面插入一维
print(a.unsqueeze(-1).shape) # torch.Size([4, 1, 28, 28, 1])
#unsqueeze-1相当于在最后插入一维
#其他unsqueeze数以此类推

#squeeze维度删减
#squeeze参数就是对应位置
b = torch.rand(1,32,1,1)
print(b.squeeze().shape) # torch.Size([32])
print(b.squeeze(0).shape) # torch.Size([32, 1, 1])
print(b.squeeze(-1).shape) # torch.Size([1, 32, 1])


# expand/repeat维度扩展
# expand:broadcasting,扩展某个维度的尺寸,参数为扩展后的尺寸值,只能扩展size为1的
# repeat:memory copied,参数给的是每个维度尺寸扩展次数

print(b.expand(4,32,14,14).shape)
# torch.Size([4, 32, 14, 14])

print(b.repeat(4,32,1,1).shape)
# torch.Size([4, 1024, 1, 1])



# .t()转置,只适用2维的
# permute
b  = torch.rand(4,3,28,32)
print(b.permute(0,2,3,1).shape)
# torch.Size([4, 28, 32, 3])

广播

# broadcasting 自动实现了unsqueeze和expand
# 从后往前匹配,也就是小维度前面unsqueeze
c = torch.rand(28,1)
d = b+c
print(d.shape) #torch.Size([4, 3, 28, 32])

合并与分割

# 合并与分割
# cat,除了拼接维度外都要一致
a = torch.rand(4,32,8)
b = torch.rand(5,32,8)
print(torch.cat([a,b],dim=0).shape)
# torch.Size([9, 32, 8])

# stack 拼接是增加一个新维度,两个要拼接的shape要一样
a = torch.rand(4,32,8)
b = torch.rand(4,32,8)
print(torch.stack([a,b],dim=0).shape)
# torch.Size([2, 4, 32, 8])
print(torch.stack([a,b],dim=-2).shape)
# torch.Size([4, 32, 2, 8])

#拆分
# split 根据长度来拆分,by len
# 第一个参数传一个数值的话,就是每一个长度为这个数值,传list的话就是指定每一个长度
a = torch.rand(3,32,8)
aa,bb,cc = a.split(split_size=1,dim=0) #1是长度
print(aa.shape,bb.shape,cc.shape)
# torch.Size([1, 32, 8]) torch.Size([1, 32, 8]) torch.Size([1, 32, 8])

aa,bb = a.split([2,1],dim = 0) #2,1是在dim=0上的长度
print(aa.shape,bb.shape)
# torch.Size([2, 32, 8]) torch.Size([1, 32, 8])

aa,bb=a.split(split_size=2,dim = 0)
print(aa.shape,bb.shape)
# torch.Size([2, 32, 8]) torch.Size([1, 32, 8])

#chunk by num
aa,bb,cc = a.chunk(3,dim=0) #3是个数
print(aa.shape,bb.shape,cc.shape)
# torch.Size([1, 32, 8]) torch.Size([1, 32, 8]) torch.Size([1, 32, 8])

数学运算

#数学运算
import torch
a = torch.ones(2,2)
b = torch.ones(2,2)
print(a+b)
print(torch.all(torch.eq(a-b,torch.sub(a,b))))
print(torch.all(torch.eq(a+b,torch.add(a,b))))
print(torch.all(torch.eq(a*b,torch.mul(a,b)))) #对应位置相乘
print(torch.all(torch.eq(a/b,torch.div(a,b)))) #对应位置相除

print(torch.mm(a,b)) #二维矩阵相乘
# tensor([[2., 2.],
#         [2., 2.]])
print(torch.matmul(a,b)) #推荐
# tensor([[2., 2.],
#         [2., 2.]])
print(a@b)
# tensor([[2., 2.],
#         [2., 2.]])

a = torch.rand(4,3,28,64)
b = torch.rand(4,3,64,32)
print(torch.matmul(a,b).shape)
# torch.Size([4, 3, 28, 32])

a = torch.exp(torch.ones(2,2))
print(torch.log(a))
# tensor([[1., 1.],
#         [1., 1.]])

统计操作

import torch
a = torch.full([8],1.)
print(a)
b = a.view(2,4)
c = a.view(2,2,2)
# torch.norm(a)

print(a.norm(1),b.norm(1),c.norm(1)) #参数1,代表1范数,绝对值求和
# tensor(8.) tensor(8.) tensor(8.)

print(a.norm(2),b.norm(2),c.norm(2)) #参数2,代表2范数,平方求和再开根号
# tensor(2.8284) tensor(2.8284) tensor(2.8284)

mm = b.norm(1,dim = 1)
print(mm,mm.shape)
# tensor([4., 4.]) torch.Size([2])

mm = c.norm(1,dim = 0)
print(mm,mm.shape)
# tensor([[2., 2.],
#         [2., 2.]]) torch.Size([2, 2])



a = torch.arange(8).view(2,4).float()
print(a)
# tensor([[0., 1., 2., 3.],
#         [4., 5., 6., 7.]])
print(a.min(),a.max(),a.mean(),a.prod(),a.sum()) #prod乘积
# tensor(0.) tensor(7.) tensor(3.5000) tensor(0.) tensor(28.)
print(a.argmax(),a.argmin()) #返回数组展平后的索引
# tensor(7) tensor(0)

#如果要对应维度的argmax
a = torch.randn(4,10)
print(a[0])
# tensor([-0.0474, -0.8717, -0.5189, -0.4401, -0.0993, -0.9494, -1.4318, -0.7474,
#         -1.2900,  0.8415])
print(a.argmax(dim=1))
# tensor([9, 4, 9, 4])

# keepdim=true可以保持dim不变,不加的话会因为统计信息造成少一个dim
print(a.argmax(dim=1,keepdim=True))
# tensor([[0],
#         [5],
#         [6],
#         [4]])

print(a.topk(2,dim=1)) #前k大的
# torch.return_types.topk(
# values=tensor([[1.0655, 0.2601],
#         [1.6584, 1.5903],
#         [1.7887, 1.2711],
#         [2.1863, 1.8528]]),
# indices=tensor([[7, 4],
#         [1, 2],
#         [6, 9],
#         [7, 9]]))
print(a.topk(2,dim=1).values.shape)
# torch.Size([4, 2])

ItVuer - 免责声明 - 关于我们 - 联系我们

本网站信息来源于互联网,如有侵权请联系:561261067@qq.com

桂ICP备16001015号