本文共 11672 字,大约阅读时间需要 38 分钟。
张量是一个多维数组,它是标量、向量、矩阵的高维拓展。
Variable是torch.autograd
中的数据类型,主要用于封装tensor
Pytorch0.4.0版本开始Variable并入Tensor
torch.tensor()
功能:从data创建tensor
torch.tensor( data, dtype=None, device=None, requires_grad=Flase, pin_memory=False )
import torchimport numpy as nptorch.manual_seed(1)flag = Trueif flag: arr = np.ones((3, 3)) print("ndarray的数据类型:",arr.dtype) t = torch.tensor(arr, device='cuda') print(t)结果:ndarray的数据类型: float64tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], device='cuda:0', dtype=torch.float64)
torch.from_numpy(ndarray)
功能:从numpy创建Tensor
注意:从torch.from_numpy()创建于原ndarray共享内存
,当修改其中一个数据,另外一个也将会被改动。
arr = np.array([[1, 2, 3], [4, 5, 6]])t = torch.from_numpy(arr)print('numpy array:\n', arr)print('tensor:\n', t)结果:numpy array: [[1 2 3] [4 5 6]]tensor: tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.int32)
# 改变 arr 时, 观察 t 时候发生变化arr[0, 0] = 0print('numpy array:\n', arr)print('tensor:\n', t)结果:numpy array: [[0 2 3] [4 5 6]]tensor: tensor([[0, 2, 3], [4, 5, 6]], dtype=torch.int32)
arr 和 t,其中任何一个改变都会使另一个发生变化,所以两者是**共享内存
**的
torch.zeros()
功能:依据size创建全0张量
size:张量的形状,如(3, 3)
out:输出的张量
layout:内存中布局形式,有strided, sparse_coo等
device:所在设备,GPU/CPU
requires_grad:是否需要梯度
torch.zeros( *size, out = None, dtype=None, layout = torch.strided, device=None, requires_grad=False )
# torch.zerosout_t = torch.tensor([1])t = torch.zeros((3, 3), out=out_t)print(t, '\n', out_t)# t 与 out_t 其实是同一个变量print(id(t), id(out_t), id(t) == id(out_t))结果:tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])84470355192 84470355192 True
torch.full() 和 torch.full_like()
功能:都是依据input形状创建全0张量
torch.full( size, fill_value, out = None, dtype=None, layout = torch.strided, device=None, requires_grad=False )
# torch.full()t = torch.full((3, 3), 1)print(t)t = torch.full((4, 4), 10)print(t)结果:tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]])tensor([[10., 10., 10., 10.], [10., 10., 10., 10.], [10., 10., 10., 10.], [10., 10., 10., 10.]])
torch.arange()
功能:创建等差的一维张量
注意:数值区间为左闭右开
区间[start, end)
torch.arange( start=0, end, steps=1, out = None, dtype=None, layout = torch.strided, device=None, requires_grad=False)
# torch.arange()t = torch.arange(2, 10, 2)print(t)结果:tensor([2, 4, 6, 8])
torch.linspace()
功能:创建均分的1维张量
注意:数值区间左闭右闭
区间[start, end]
torch.linspace( start=0, end, steps=100, out = None, dtype=None, layout = torch.strided, device=None, requires_grad=False)
# torch.linspace()t = torch.linspace(2, 10, 5)print(t)t = torch.linspace(2, 10, 6) # step_lengh = (end-start) / (steps-1)print(t)结果:tensor([ 2., 4., 6., 8., 10.])tensor([ 2.0000, 3.6000, 5.2000, 6.8000, 8.4000, 10.0000])
torch.logspace()
功能:创建对数均分的1维张量
注意:长度为steps, 底为base
torch.logspace( start=0, end, steps=100, base=10, out = None, dtype=None, layout = torch.strided, device=None, requires_grad=False)
torch.eye()
功能:创建单位为对角矩阵(2维张量)
注意:默认为方阵
torch.eye( n, m = None, out = None, dtype=None, layout = torch.strided, device=None, requires_grad=False)
# torch.eye()t = torch.eye(n=2, m=2)print(t)结果:tensor([[1., 0.], [0., 1.]])
torch.normal()
功能:生成正态分布(高斯分布)
标
量, std为标
量标
量, std为张
量张
量, std为标
量张
量, std为张
量torch.normal( mean, std, out=None)torch.normal( mean, std, size, out=None)
# torch.normal()# mean: 张量,std: 张量mean = torch.arange(1, 5, dtype=torch.float)std = torch.arange(1, 5, dtype=torch.float)t_normal = torch.normal(mean, std)print('mean:{}\nstd:{}'.format(mean, std))结果:mean:tensor([1., 2., 3., 4.])std:tensor([1., 2., 3., 4.])
# mean: 标量,std: 标量t_normal = torch.normal(0., 1., size=(4,))print(t_normal)结果:tensor([-0.4519, -0.1661, -1.5228, 0.3817])
# mean: 张量,std: 标量mean = torch.arange(1, 5, dtype=torch.float)std = 1t_normal = torch.normal(mean, std)print('mean:{}\nstd:{}'.format(mean, std))print(t_normal)结果:mean:tensor([1., 2., 3., 4.])std:1tensor([-0.0276, 1.4369, 2.1077, 3.9417])
torch.randn() 和 torch.randn_like()
功能:都是在区间[0, 1)上,生成均匀分布
torch.rand( *size, out = None, dtype = None, layout = torch.strided, device = None, requires_grad = False)
torch.randint() 和 torch.randint_like()
功能:区间[low, high)生成整数均匀分布
torch.rand( low=0, high, size, out = None, dtype = None, layout = torch.strided, device = None, requires_grad = False)
torch.bernoulli()
功能:以input为概率,生成比努力分布(0-1分布, 两点分布)
torch.bernoulli( input, generator = NOne, out = None)
torch.randperm()
功能:生成从0到n-1的随机排列
torch.randperm( n, out = None, dtype = torch.int64, device = None, requires_grad = False)
torch.cat()
功能:将张量按维度dim进行拼接
torch.cat( tensors, dim = 0, out = None)
import torchtensor = torch.ones((2, 3))tensor0 = torch.cat([tensor, tensor], dim=0)tensor1 = torch.cat([tensor, tensor], dim=1)print('tensor0:{} shape:{}\ntensor1:{} shape:{}'.format(tensor0, tensor0.shape, tensor1, tensor1.shape))结果:tensor0:tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) shape:torch.Size([4, 3])tensor1:tensor([[1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1.]]) shape:torch.Size([2, 6])
torch.stack()
功能:将新创建的维度
dim上进行拼接
torch.stack( tensors, dim = 0, out = None)
tensor_stack = torch.stack([tensor, tensor, tensor], dim=0)print('tensor_stack:{} shape:{}'.format(tensor_stack, tensor_stack.shape))结果:tensor_stack:tensor([[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]]) shape:torch.Size([3, 2, 3])
torch.chunk()
功能:将张量按维度dim进行平均切分
返回值是:张量列表
注意:若不能整除,最后一份张量维度小于其他张量
torch.chunk( input, chunks, dim = 0,)
a = torch.ones((2, 5))list_of_tensors = torch.chunk(a, dim=1, chunks=2)for idx, t in enumerate(list_of_tensors):\ print('第{}个张量:{}, shape is{}'.format(idx+1, t, t.shape))结果:第1个张量:tensor([[1., 1., 1.], [1., 1., 1.]]), shape istorch.Size([2, 3])第2个张量:tensor([[1., 1.], [1., 1.]]), shape istorch.Size([2, 2])
a = torch.ones((2, 7))list_of_tensors = torch.chunk(a, dim=1, chunks=3)for idx, t in enumerate(list_of_tensors):\ print('第{}个张量:{}, shape is {}'.format(idx+1, t, t.shape)) 结果:第1个张量:tensor([[1., 1., 1.], [1., 1., 1.]]), shape is torch.Size([2, 3])第2个张量:tensor([[1., 1., 1.], [1., 1., 1.]]), shape is torch.Size([2, 3])第3个张量:tensor([[1.], [1.]]), shape is torch.Size([2, 1])
torch.splite()
功能:将张量按维度dim进行切分
返回值:张量列表
torch.split( tensor, splite_size_or_sections, dim = 0)
tensor = torch.ones((2, 5))list_of_tensors = torch.split(tensor, 2, dim=1)for idx, t in enumerate(list_of_tensors): print('第{}个张量:{}, shape is{}'.format(idx+1, t, t.shape)) 结果:第1个张量:tensor([[1., 1.], [1., 1.]]), shape istorch.Size([2, 2])第2个张量:tensor([[1., 1.], [1., 1.]]), shape istorch.Size([2, 2])第3个张量:tensor([[1.], [1.]]), shape istorch.Size([2, 1])
tensor = torch.ones((2, 5))list_of_tensors = torch.split(tensor, [2, 1, 2], dim=1)for idx, t in enumerate(list_of_tensors): print('第{}个张量:{}, shape is{}'.format(idx+1, t, t.shape)) 结果:第1个张量:tensor([[1., 1.], [1., 1.]]), shape istorch.Size([2, 2])第2个张量:tensor([[1.], [1.]]), shape istorch.Size([2, 1])第3个张量:tensor([[1., 1.], [1., 1.]]), shape istorch.Size([2, 2])
torch.index_select()
功能:将在维度dim上,按index索引数据
返回值:依index索引数据拼接的张量
torch.index_select( input, dim, index, out = None)
t = torch.randint(0, 9, size=(3, 3))idx = torch.tensor([0, 2], dtype=torch.long)t_select = torch.index_select(t, dim=1, index=idx)print('t:\n{}\nt_select:\n{}'.format(t, t_select))结果:t:tensor([[5, 8, 8], [7, 8, 3], [1, 8, 3]])t_select:tensor([[5, 8], [7, 3], [1, 3]])
torch.masked_select()
功能:按mask中的True进行索引
返回值:一维张量
torch.masked_select( input, mask, out=None)
t = torch.randint(0, 9, size=(3, 3))mask = t.le(5) # ge is greate than ot equal :>= / gt is greate then: > / le :<= / lt:
torch.reshape()
功能:变换张量形状
当张量在内存中是连续时,新张量与input共享数据内存
torch.reshape( input, shape )
t = torch.randperm(8)t_reshape = torch.reshape(t, (-1, 2, 2)) print('t:\n{}\nt_shape:\n{}'.format(t, t_reshape))# -1代表着不用理会这个数是什么,程序会根据其他维度确定该维的大小t[0] = 1024print('t:\n{}\nt_shape:\n{}'.format(t, t_reshape))print('t.data 内存地址:{}'.format(id(t.data)))print('t_reshape.data 内存地址:{}'.format(id(t_reshape.data)))
结果:t:tensor([3, 4, 0, 5, 2, 6, 7, 1])t_shape:tensor([[[3, 4], [0, 5]], [[2, 6], [7, 1]]])t:tensor([1024, 4, 0, 5, 2, 6, 7, 1])t_shape:tensor([[[1024, 4], [ 0, 5]], [[ 2, 6], [ 7, 1]]])t.data 内存地址:1654219674232t_reshape.data 内存地址:1654219674232
torch.tanspose()
功能:交换张量的两个维度
torch.transpose( input, dim0, dim1)
t = torch.rand((2, 3, 4))t_transpose = torch.transpose(t, dim0=1, dim1=2)print('t shape:{}\nt_tanspose shape:{}'.format(t.shape, t_transpose.shape))结果:t shape:torch.Size([2, 3, 4])t_tanspose shape:torch.Size([2, 4, 3])
torch.t()
功能:二维张量转置,对矩阵而言,等价于torch.transpose(input, 0, 1)
torch.t(input)
t = torch.rand((1, 2))t_t = torch.t(t)print('\nt:{}\nt_t:{}'.format(t, t_t))结果:t shape:torch.Size([2, 3, 4])t_tanspose shape:torch.Size([2, 4, 3])t:tensor([[0.1058, 0.2047]])t_t:tensor([[0.1058], [0.2047]])
torch.squeeze
功能:压缩
长度为1的维度(轴)
torch.squeeze( input, dim=None, out=None)
t = torch.rand((1, 2, 3, 1))t_sq = torch.squeeze(t)t_sq1 = torch.squeeze(t, dim=0)t_sq2 = torch.squeeze(t, dim=1)print(t.shape)print(t_sq.shape)print(t_sq1.shape)print(t_sq2.shape)结果:torch.Size([1, 2, 3, 1])torch.Size([2, 3])torch.Size([2, 3, 1])torch.Size([1, 2, 3, 1])
torch.unsqueeze()
功能:依据dim扩展
维度
torch.unsqueeze( input, dim, out=None)
加减乘除
torch.add() torch.addcdiv() torch.addcmul() torch.sub() torch.div() torch.mul()
torch.add():
功能:逐元素计算 input+alpha*other
torch.add( input, alpha=1, other, out=None)
torch.addcdiv():
o u t i = i n p u t i + v a l u e ∗ t e n s o r 1 i t e n s o r 2 i out_i = input_i + value * \frac{tensor1_i}{tensor2_i} outi=inputi+value∗tensor2itensor1itorch.addcmul:
o u t i = i n p u t i + v a l u e ∗ t e n s o r 1 i ∗ t e n s o r 2 i out_i= input_i + value * tensor1_i * tensor2_i outi=inputi+value∗tensor1i∗tensor2i
torch.addcmul( input, value=1, tensor1, tensor2, out=None)
对数、指数、幂函数
torch.log(input, out=None)torch.log10(input, out=None)torch.log2(input, out=None)torch.exp(input, out=None)torch.pow()
三角函数
torch.abs(input, out=None)torch.acos(input, out=None)torch.cosh(input, out=None)torch.cos(input, out=None)torch.asin(input, out=None)torch.atan(input, out=None)torch.atan2(input, out=None)
转载地址:http://iedo.baihongyu.com/