1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
| a = torch.randint(0, 10, (3, 4)) """ Out: tensor([[3, 7, 1, 3], [6, 4, 1, 3], [8, 8, 5, 7]]) """
b = a.view(2, 6) """ Out: tensor([[3, 7, 1, 3, 6, 4], [1, 3, 8, 8, 5, 7]]) """
c = a.reshape(2, 6) """ Out: tensor([[3, 7, 1, 3, 6, 4], [1, 3, 8, 8, 5, 7]]) """
print(id(a)==id(b), id(a)==id(c), id(b)==id(c)) """ 前提:python的变量和数据是保存在不同的内存空间中的,PyTorch中的Tensor的存储也是类似的机制,tensor相当于python变量,保存了tensor的形状(size)、步长(stride)、数据类型(type)等信息(或其引用),当然也保存了对其对应的存储器Storage的引用,存储器Storage就是对数据data的封装。 viewed对象和reshaped对象都存储在与原始对象不同的地址内存中,但是它们共享存储器Storage,也就意味着它们共享基础数据。 """ print(id(a.storage())==id(b.storage()), id(a.storage())==id(c.storage()), id(b.storage())==id(c.storage())) """ Out: False False False True True True """
a[0]=0 print(a, b, c) """ Out: tensor([[0, 0, 0, 0], [6, 4, 1, 3], [8, 8, 5, 7]]) tensor([[0, 0, 0, 0, 6, 4], [1, 3, 8, 8, 5, 7]]) tensor([[0, 0, 0, 0, 6, 4], [1, 3, 8, 8, 5, 7]]) """
c[0]=1 print(a, b, c) """ Out: tensor([[1, 1, 1, 1], [1, 1, 1, 3], [8, 8, 5, 7]]) tensor([[1, 1, 1, 1, 1, 1], [1, 3, 8, 8, 5, 7]]) tensor([[1, 1, 1, 1, 1, 1], [1, 3, 8, 8, 5, 7]]) """
|