pytorch代码管理

发布时间:2023年12月20日
torch.svd  torch.mm
torch.matmul
torch.argsort
torch.view_as_complex torch.view_as_real
torch.split
unsqueeze  squeeze

masked_fill
torch.einsum

x.requires_grad_(True)
torch.optim.SGD([w],lr=0.01)

torch.masked_select
torch.zeros_like
torch.tensor([4,8],dtype=torch.int32)
torch.log(x)/math.log(2)
torch.clamp_min
torch.sum(hx,dim=0).flatten(0)
torch.searchsorted
torch.round(x)
torch.index_select
torch.randint(0,2,(3,4))
mask_new[mask < rate_constraint] = 1

nn.functional.gumbel_softmax
torch.flip
torch.cumsum




class depthwise_separable_conv(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(depthwise_separable_conv, self).__init__()
        self.ch_in = ch_in
        self.ch_out = ch_out
        self.depth_conv = nn.Conv2d(ch_in, ch_in, kernel_size=7, padding=2, groups=ch_in)
        self.point_conv = nn.Conv2d(ch_in, ch_out, kernel_size=7)

    def forward(self, x):
        x = self.depth_conv(x)
        x = self.point_conv(x)
        return x

1.DPCA

import torch 
a = torch.randn(5, 3)
u, s, v = torch.svd(a)
a_hat = torch.mm(torch.mm(u, torch.diag(s)), v.t())
print(f"待分解的矩阵\n{a}")
print(f"分解因子\n{u}")
print(f"分解因子\n{s}")
print(f"分解因子\n{v}")
print(f"恢复的矩阵\n{a_hat}")
待分解的矩阵
tensor([[-0.2384, -0.2361, -2.9088],
        [-1.3432, -0.1278,  2.3487],
        [ 0.1430, -0.5459,  1.2211],
        [-0.8490, -0.2448, -0.5152],
        [-0.5628, -0.7402, -0.0866]])
分解因子
tensor([[ 7.1150e-01, -4.0164e-01, -3.5282e-02],
        [-6.2821e-01, -5.6756e-01,  3.2516e-01],
        [-2.9931e-01,  5.8300e-02, -7.3013e-01],
        [ 9.7694e-02, -5.4707e-01,  1.0942e-01],
        [-2.5454e-04, -4.6247e-01, -5.8988e-01]])
分解因子
tensor([3.9990, 1.7200, 0.8296])
分解因子
tensor([[ 0.1372,  0.9251, -0.3540],
        [ 0.0130,  0.3557,  0.9345],
        [-0.9905,  0.1328, -0.0368]])
恢复的矩阵
tensor([[-0.2384, -0.2361, -2.9088],
        [-1.3432, -0.1278,  2.3487],
        [ 0.1430, -0.5459,  1.2211],
        [-0.8490, -0.2448, -0.5152],
        [-0.5628, -0.7402, -0.0866]])
# 1维是对应元素相乘求和
# 2维则是矩阵乘法
# tensor1 = torch.randint(1,5,(3,4))
tensor1 = torch.randint(1,5,(3,4))
tensor2 = torch.randint(0,3,(4,))
tensor3 = torch.matmul(tensor1, tensor2)
print(f"tensor1:\n{tensor1}")
print(f"tensor2:\n{tensor2}")
print(f"tensor3:\n{tensor3}")
print("=================================================")
# 3维
tensor4 = torch.randint(1,5,(10,3,4))
tensor5 = torch.randint(0,3,(10,4,5))
tensor6 = torch.matmul(tensor4, tensor5)
# print(f"tensor1:\n{tensor4}")
# print(f"tensor2:\n{tensor5}")
print(f"tensor3:\n{tensor6.size()}")
print("=================================================")
tensor1:
tensor([4, 4, 4])
tensor2:
tensor([1, 0, 1])
tensor3:
8
=================================================
tensor3:
torch.Size([10, 3, 5])
=================================================
from torch.nn import Conv2d, Sequential, BatchNorm2d, ReLU, Linear, Flatten, BatchNorm1d, PReLU, ConvTranspose2d
decoder = Sequential(
            ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=5, stride=2, padding=2, output_padding=1),
            PReLU(),
            ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=5, stride=2, padding=2, output_padding=1),
            PReLU(),
            ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
            PReLU(),
            ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=5, stride=1, padding=2),
            PReLU(),
            ConvTranspose2d(in_channels=16, out_channels=6, kernel_size=5, stride=1, padding=2),
            PReLU(),
)
tensor1 = torch.randn(64, 64, 8, 8)
tensor2 = decoder(tensor1)
print(tensor2.shape)
torch.Size([64, 6, 32, 32])
# s_1_2 = torch.randn(10,)
s_1_2 = torch.tensor([-0.4255, -0.2113,  0.3511, -0.4498, -1.1088,  0.5457,  0.1559,  0.7289,
         0.2720, -2.8669])
print(f"s_1_2:\n{s_1_2}")
ind = torch.argsort(s_1_2,descending=True)
print(f"s_1_2的索引ind:\n{ind}")
ind = ind[:4]
print(f"选择前dim_p个索引ind:\n{ind}")
ind < 3
# ind_1 = ind[ind < 3]
# ind_2 = ind[ind >=3]-5
# print(f"ind_1:\n{ind_1}")
# print(f"ind_2:\n{ind_2}")
s_1_2:
tensor([-0.4255, -0.2113,  0.3511, -0.4498, -1.1088,  0.5457,  0.1559,  0.7289,
         0.2720, -2.8669])
s_1_2的索引ind:
tensor([7, 5, 2, 8, 6, 1, 0, 3, 4, 9])
选择前dim_p个索引ind:
tensor([7, 5, 2, 8])
tensor([False, False,  True, False])
z1 = torch.randn(5,32*8*8)
z2 = torch.randn(5,32*8*8)
mu_1 = torch.randn(32*8*8,)
mu_2 = torch.randn(32*8*8,)
v_1 = torch.randn(32*8*8,5)
v_2 = torch.randn(32*8*8,5)
z1_p = torch.matmul(z1 - mu_1, v_1[:, ind_1])
z2_p = torch.matmul(z2 - mu_2, v_2[:, ind_2])
print((z1 - mu_1).shape)
print(v_1[:, ind_1].shape)
print(z1_p.shape)
print(z2_p.shape)
torch.Size([5, 2048])
torch.Size([2048, 1])
torch.Size([5, 1])
torch.Size([5, 3])
z1 = torch.randn(128,1024,2)
z2 = torch.view_as_complex(z1)
z3 = torch.view_as_real(z2)
print(f"z1:\n{z1.shape}")
print(f"z2:\n{z2.shape}")
print(f"z3:\n{z3.shape}")
z1:
torch.Size([128, 1024, 2])
z2:
torch.Size([128, 1024])
z3:
torch.Size([128, 1024, 2])
obs = torch.randn(128,6,32,32)
obs1,obs2=torch.split(obs,3,dim=1) #当split_size_or_sections为一个数值3,则等分,维度均为
help(torch.split)
print(f"obs1:\n{obs1.shape}")
Help on function split in module torch.functional:

split(tensor: torch.Tensor, split_size_or_sections: Union[int, List[int]], dim: int = 0) -> List[torch.Tensor]
    Splits the tensor into chunks. Each chunk is a view of the original tensor.
    
    If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
    be split into equally sized chunks (if possible). Last chunk will be smaller if
    the tensor size along the given dimension :attr:`dim` is not divisible by
    :attr:`split_size`.
    
    If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
    into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
    to :attr:`split_size_or_sections`.
    
    Args:
        tensor (Tensor): tensor to split.
        split_size_or_sections (int) or (list(int)): size of a single chunk or
            list of sizes for each chunk
        dim (int): dimension along which to split the tensor.
    
    Example::
    
        >>> a = torch.arange(10).reshape(5,2)
        >>> a
        tensor([[0, 1],
                [2, 3],
                [4, 5],
                [6, 7],
                [8, 9]])
        >>> torch.split(a, 2)
        (tensor([[0, 1],
                 [2, 3]]),
         tensor([[4, 5],
                 [6, 7]]),
         tensor([[8, 9]]))
        >>> torch.split(a, [1,4])
        (tensor([[0, 1]]),
         tensor([[2, 3],
                 [4, 5],
                 [6, 7],
                 [8, 9]]))

obs1:
torch.Size([128, 3, 32, 32])
import numpy as np
a = np.random.rand()
print(a)
0.6296961807592375
for rate in range(10):
    compression_rate = min((rate+1)*0.1,1)
    channel = max(np.sqrt(28 * (1 - compression_rate) / 2), 1)
    print(f"compression_rate:{compression_rate}")
    print(f"channel:{int(channel)}")
compression_rate:0.1
channel:3
compression_rate:0.2
channel:3
compression_rate:0.30000000000000004
channel:3
compression_rate:0.4
channel:2
compression_rate:0.5
channel:2
compression_rate:0.6000000000000001
channel:2
compression_rate:0.7000000000000001
channel:2
compression_rate:0.8
channel:1
compression_rate:0.9
channel:1
compression_rate:1.0
channel:1
class CNNEncoder(nn.Module):
    def __init__(self, obs_shape, feature_dim, num_layers=3, num_filters=64, n_hidden_layers=2, hidden_size=128,
                 min_log_std=-10, max_log_std=2):
        super().__init__()

        # assert len(obs_shape) == 3
        self.obs_shape = obs_shape
        self.feature_dim = feature_dim
        self.num_layers = num_layers
        self.min_log_std = min_log_std
        self.max_log_std = max_log_std

        self.conv_layers = nn.ModuleList(
            [nn.Conv2d(obs_shape[0], num_filters, 3, stride=2)]
        )
        for i in range(num_layers - 1):
            self.conv_layers.append(nn.Conv2d(num_filters, num_filters, 3, stride=2))  # 1

        x = torch.rand([1] + list(obs_shape))
        conv_flattened_size = np.prod(self.forward_conv(x).shape[-3:])
        ff_layers = OrderedDict()
        previous_feature_size = conv_flattened_size
        for i in range(n_hidden_layers):
            ff_layers[f'linear_{i + 1}'] = nn.Linear(in_features=previous_feature_size,
                                                     out_features=hidden_size)
            ff_layers[f'relu_{i + 1}'] = nn.ReLU()
            previous_feature_size = hidden_size

        ff_layers[f'linear_{n_hidden_layers + 1}'] = nn.Linear(in_features=previous_feature_size,
                                                               out_features=2 * feature_dim)
        self.ff_layers = nn.Sequential(ff_layers)

    def forward_conv(self, obs):
        # assert obs.max() <= 1 and 0 <= obs.min(), f'Make sure images are in [0, 1]. Get [{obs.min()}, {obs.max()}]'
        conv = torch.relu(self.conv_layers[0](obs))
        for i in range(1, self.num_layers):
            conv = torch.relu(self.conv_layers[i](conv))
        conv = conv.reshape(conv.size(0), -1)
        return conv

    def forward(self, obs, detach=False):
        h = self.forward_conv(obs)

        if detach:
            h = h.detach()

        out = self.ff_layers(h)
        # 按照维度dim=1分割,self.feature_dim+elf.feature_dim=dim1的总维度
        mean, log_std = out.split([self.feature_dim, self.feature_dim], dim=1)
        log_std = log_std.clip(self.min_log_std, self.max_log_std)
        return mean, log_std
torch.Size([128, 16, 28, 28])
import torch
z = torch.randn(1,3,34345,32,32)
z1 = z.unsqueeze(2)
z2 = z1.unsqueeze(3)
print(z1.shape)
print(z2.shape)
torch.Size([1, 3, 1, 34345, 32, 32])
torch.Size([1, 3, 1, 1, 34345, 32, 32])
snr = torch.randn(4,3,32,32)
snr1 = snr.squeeze(-2)
snr2 = snr1.squeeze()
print(snr1.shape)
print(snr2.shape)
torch.Size([4, 3, 32, 32])
torch.Size([4, 3, 32, 32])
import torch
from torch import nn
input = torch.tensor([1, 2, 3], dtype=torch.float32)
target = torch.tensor([1, 2, 5], dtype=torch.float32)

input = torch.reshape(input, (1, 1, 1, 3))
target = torch.reshape(target, (1, 1, 1, 3))

# MSELoss
loss_mse = nn.MSELoss()
result_mse1 = loss_mse(input, target)
result_mse2 = loss_mse(input*100., target*100.)
print(result_mse1)
print(result_mse2)
tensor(1.3333)
tensor(13333.3330)
import numpy as np
noise_1 = torch.randn(1,2)
noise_2 = torch.randn(1,2)*np.sqrt(0.1**2)
print(noise_1)
print(noise_2)
tensor([[1.6070, 0.3328]])
tensor([[ 0.1151, -0.0453]])
### import torch 
a = torch.randn(1,2,5,4)
u, s, v = torch.svd(a)
# x = torch.diag(s)
# a_hat = torch.mm(u, torch.diag(s))
# a_hat = torch.mm(torch.mm(u, torch.diag(s)), v.t())
print(f"分解因子u\n{u.shape}")
print(f"分解因子s\n{s}")
print(f"分解因子s\n{s.shape}")
print(f"分解因子v\n{v.shape}")
# print(f"x\n{x.shape}")
# print(f"恢复的矩阵\n{a_hat}")
分解因子u
torch.Size([1, 2, 5, 4])
分解因子s
tensor([[[2.7966, 1.9091, 1.5002, 0.5480],
         [3.5446, 2.2571, 1.7378, 1.0956]]])
分解因子s
torch.Size([1, 2, 4])
分解因子v
torch.Size([1, 2, 4, 4])
ind = torch.tensor([32,  0, 33, 34,  1, 35, 36,  2])
tmp1 = torch.tensor([False,  True, False, False,  True, False, False,  True])
tmp2 = torch.tensor([ True, False,  True,  True, False,  True,  True, False])
ind1 = ind[tmp1]
ind2 = ind[tmp2]-32
# ind1
ind2
tensor([0, 1, 2, 3, 4])
tensor1 = torch.randn(64, 256, 64)
tensor2 = torch.randn(64, 64)
torch.matmul(tensor1, tensor2).size()
torch.Size([64, 256, 64])
#c = x.masked_fill(mask,big)将张量x中,与mask中为1的索引全部换成big,
# https://blog.csdn.net/weixin_41684423/article/details/117339499
import torch
a=torch.tensor([[5,5,5,5], [6,6,6,6], [7,7,7,7], [1,1,1,1],[2,2,2,2],[3,3,3,3]])
print(a)
print(a.size())
# print("#############################################3")
mask = torch.IntTensor([[1],[0],[0],[0],[1],[1]])
# print(mask)
print(mask.size())
b = a.masked_fill(mask, value=torch.tensor(-1e9))
print(b)
print(b.size())
tensor([[5, 5, 5, 5],
        [6, 6, 6, 6],
        [7, 7, 7, 7],
        [1, 1, 1, 1],
        [2, 2, 2, 2],
        [3, 3, 3, 3]])
torch.Size([6, 4])
torch.Size([6, 1])
tensor([[-1000000000, -1000000000, -1000000000, -1000000000],
        [          6,           6,           6,           6],
        [          7,           7,           7,           7],
        [          1,           1,           1,           1],
        [-1000000000, -1000000000, -1000000000, -1000000000],
        [-1000000000, -1000000000, -1000000000, -1000000000]])
torch.Size([6, 4])

2.transformer

# torch.einsum 根据指定规则ik, kj -> ij,求和
# https://blog.csdn.net/a2806005024/article/details/96462827
a_tensor = torch.tensor([[11, 12],
                        [21, 22]])
 
b_tensor = torch.tensor([[1, 1],
                        [0, 0]])
 
# 'ik, kj -> ij'语义解释如下:
# 输入a_tensor: 2维数组,下标为ik,
# 输入b_tensor: 2维数组,下标为kj,
# 输出output:2维数组,下标为ij。
# 隐含语义:输入a,b下标中相同的k,是求和的下标,对应上面的例子2的公式
output = torch.einsum('ik, kj -> ij', a_tensor, b_tensor)

print(output)
 
tensor([[11, 11],
        [21, 21]])
a=torch.tensor([[5,5,5,5], [6,6,6,6], [7,7,7,7], [1,1,1,1]])
print(a)
torch.einsum('ii', a) # trace(迹)
torch.einsum('ii->i', a) #对角线
tensor([[5, 5, 5, 5],
        [6, 6, 6, 6],
        [7, 7, 7, 7],
        [1, 1, 1, 1]])

tensor([5, 6, 7, 1])
import torch
import torch.nn as nn
x = torch.randn(1, 16, 32, 32)
# print(x)
unfold = nn.Unfold(kernel_size=(5,5),dilation=(1, 1), padding=2, stride=1)
y = unfold(x)
print(y.size())
# print(y)
torch.Size([1, 400, 1024])
x = torch.randn(1, 16, 32, 32)
o_mapping = nn.AvgPool2d(kernel_size=1, stride=1)
x1 = o_mapping(x)
print(x1.size())
torch.Size([1, 16, 32, 32])

3.求梯度的深入研究

# 2.5. 自动微分
import torch

x = torch.arange(4.0)
x
tensor([0., 1., 2., 3.])
x.requires_grad_(True)  # 等价于x=torch.arange(4.0,requires_grad=True)
x.grad  # 默认值是None
y = 2 * torch.dot(x, x)
y
tensor(28., grad_fn=<MulBackward0>)
y.backward()
x.grad
tensor([ 0.,  4.,  8., 12.])
x.grad == 4 * x
tensor([True, True, True, True])
# 在默认情况下,PyTorch会累积梯度,我们需要清除之前的值
x.grad.zero_()
y = x.sum()
y.backward()
x.grad
y
x
tensor([0., 1., 2., 3.], requires_grad=True)
import torch
w = torch.tensor([0.5,0.5],requires_grad=True)
loss = w[0]**2 + 2*w[1]**2
opti = torch.optim.SGD([w],lr=0.01)
loss.backward()
print("w的梯度:",w.grad)
print("w:",w)
opti.step()
print("w:",w)
w的梯度: tensor([1., 2.])
w: tensor([0.5000, 0.5000], requires_grad=True)
w: tensor([0.4900, 0.4800], requires_grad=True)
import torch
w = torch.tensor([0.5,0.5],requires_grad=True)
loss = w[0]**2 + 2*w[1]**2
opti = torch.optim.SGD([w],lr=0.01)
loss.backward()
print("w的梯度:",w.grad)
print("w:",w)
opti.step()
print("w:",w)
opti.step()
print("w:",w)
opti.step()
print("w:",w)
w的梯度: tensor([1., 2.])
w: tensor([0.5000, 0.5000], requires_grad=True)
w: tensor([0.4900, 0.4800], requires_grad=True)
w: tensor([0.4800, 0.4600], requires_grad=True)
w: tensor([0.4700, 0.4400], requires_grad=True)
import torch
w = torch.tensor([0.5,0.5],requires_grad=True)
loss = w[0].clone()**2 + 2*w[1].clone()**2
opti = torch.optim.SGD([w],lr=0.01)
loss.backward(retain_graph=True)
print("w的梯度:",w.grad)
print("w:",w)
opti.step()
print("w:",w)
opti.zero_grad()
loss = w[0].clone()**2 + 2*w[1].clone()**2
loss.backward()
print("w的梯度:",w.grad)
opti.step()
print("w:",w)
w的梯度: tensor([1., 2.])
w: tensor([0.5000, 0.5000], requires_grad=True)
w: tensor([0.4900, 0.4800], requires_grad=True)
w的梯度: tensor([0.9800, 1.9200])
w: tensor([0.4802, 0.4608], requires_grad=True)
import torch
w = torch.tensor([0.5,0.5],requires_grad=True)
loss = w[0].clone()**2 + 2*w[1].clone()**2
opti = torch.optim.SGD([w],lr=0.01,weight_decay=1)
loss.backward(retain_graph=True)
print("w的梯度:",w.grad)
print("w:",w)
opti.step()
print("w:",w)
opti.step()
print("w:",w)
w的梯度: tensor([1., 2.])
w: tensor([0.5000, 0.5000], requires_grad=True)
w: tensor([0.4850, 0.4750], requires_grad=True)
w: tensor([0.4702, 0.4502], requires_grad=True)
import torch
w = torch.tensor([0.5,0.5],requires_grad=True)
loss = w[0].clone()**2 + 2*w[1].clone()**2
opti = torch.optim.SGD([w],lr=0.01,momentum=0.1)
loss.backward(retain_graph=True)
print("w的梯度:",w.grad)
print("w:",w)
opti.step()
print("w:",w)
opti.step()
print("w:",w)
w的梯度: tensor([1., 2.])
w: tensor([0.5000, 0.5000], requires_grad=True)
w: tensor([0.4900, 0.4800], requires_grad=True)
w: tensor([0.4790, 0.4580], requires_grad=True)

4.NTSCC

import torch
mask_BCHW = torch.randint(0,1,(3,5))
mask_BCHW_byte = mask_BCHW.bool() #这边用byte()也可以但是会报警告,建议bool()
s_masked = torch.randn(3)
"""
torch.masked_select(s_masked,mask_BCHW_byte)
s_masked:待掩码的张量;
mask_BCHW_byte:掩码,需要转为bool(byte也可以)
"""
channel_input = torch.masked_select(s_masked,mask_BCHW_byte) 
s_hat = torch.zeros_like(s_masked)
s_hat[mask_BCHW] = channel_input

print(f"mask_BCHW:\n{mask_BCHW}")
print(f"mask_BCHW_byte:\n{mask_BCHW_byte}")
print(f"s_masked:\n{s_masked}")
print(f"channel_input:\n{channel_input}")
# print(f"s_hat:\n{s_hat}")
import torch
import numpy as np
# z = torch.randn(128,1024)
# num = torch.numel(z[1])
a = np.sqrt(10 ** (3 / 10))
print(a)
1.4125375446227544
import torch
import math
x = torch.randn(3,2,1,1)
x = torch.tensor([4,8],dtype=torch.int32)
x1 = torch.log(x)/math.log(2)
hx = torch.clamp_min(x1,0)
symbol_num = torch.sum(hx,dim=0).flatten(0)
print(f"x:\n{x}")
print(f"x1:\n{x1}")
print(f"hx:\n{hx}")
print(f"symbol_num:\n{symbol_num}")
x:
tensor([4, 8], dtype=torch.int32)
x1:
tensor([2., 3.])
hx:
tensor([2., 3.])
symbol_num:
tensor([5.])
import numpy as np
a= torch.randn(3,512,16,16).chunk(4,3) #将第3个维度等分成4份,其他维度不变
print(a[0].shape)
print(a[1].shape)
torch.Size([3, 512, 16, 4])
torch.Size([3, 512, 16, 4])
import torch
# torch.zeros(self.rate_num, self.C, max(self.rate_choice))
sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
values = torch.tensor([[3, 6, 9], [3, 6, 9]])
selected_sequence = torch.searchsorted(sorted_sequence, values)#返回values在sorted_sequence中的索引
print(selected_sequence)
tensor([[1, 3, 4],
        [1, 2, 4]])
mask = torch.arange(0,32)
mask1 = mask.repeat(32,1)
mask2 = mask1.repeat(2,1,1)
mask = torch.arange(0, 32).repeat(8 * 8, 1)
print(mask)
print(mask.shape)
a = torch.arange(0, 32) #a是一个列向量a.repeat(64,1),结果就是(64,32)
print(a.shape)
# print(mask1)
# print(mask1.shape)
# print(mask2)
# print(mask2.shape)
tensor([[ 0,  1,  2,  ..., 29, 30, 31],
        [ 0,  1,  2,  ..., 29, 30, 31],
        [ 0,  1,  2,  ..., 29, 30, 31],
        ...,
        [ 0,  1,  2,  ..., 29, 30, 31],
        [ 0,  1,  2,  ..., 29, 30, 31],
        [ 0,  1,  2,  ..., 29, 30, 31]])
torch.Size([64, 32])
torch.Size([32])
index = torch.ones(64,dtype = torch.long)
rate_choice_tensor = torch.tensor([0,16,32])
rate_constraint = rate_choice_tensor[index]
# rate_constraint1 = rate_constraint.reshape(1,4,1)
# print(index)
print(rate_constraint)
tensor([16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
        16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
        16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
        16, 16, 16, 16, 16, 16, 16, 16, 16, 16])
x = torch.randn(1,3, 4)
mask = x.byte().to(bool)
mask1 = torch.masked_select(x, mask)
print(x)
print(mask)
print(mask.shape)
print(mask1.shape)
tensor([[[-0.5914, -0.4083, -0.2550,  0.5358],
         [-1.7481,  0.3756, -0.1665, -0.2826],
         [ 1.4188,  1.6289, -1.5177,  0.4359]]])
tensor([[[False, False, False, False],
         [ True, False, False, False],
         [ True,  True,  True, False]]])
torch.Size([1, 3, 4])
torch.Size([4])
import torch
from torch import nn
 
class MyModule(nn.Module):
    def __init__(self, input_size, output_size):
        super(MyModule, self).__init__()
        self.test = torch.rand(input_size, output_size)
        self.linear = nn.Linear(input_size, output_size)
    def forward(self, x):
        return self.linear(x)
 
model = MyModule(4, 2)
print(list(model.named_parameters()))
[('linear.weight', Parameter containing:
tensor([[ 0.3464, -0.3051, -0.1750,  0.3745],
        [ 0.1961, -0.2831,  0.1392,  0.3063]], requires_grad=True)), ('linear.bias', Parameter containing:
tensor([0.0186, 0.4188], requires_grad=True))]
x = torch.randn(2,3, 4)
y = torch.round(x)
print(x)
print(y)
# x_hat = x[].reshape(-1)
# print(x_hat.shape)
tensor([[[-0.4185,  0.7974, -0.0894,  1.7687],
         [ 1.1744,  0.6782, -0.4971,  0.5060],
         [ 1.3180,  0.2855,  0.0362,  0.9105]],

        [[ 0.5914, -0.0294,  0.5370, -0.9174],
         [ 0.7989, -0.3448,  0.7094,  0.1814],
         [ 1.1233,  0.4332, -1.7268,  0.4500]]])
tensor([[[-0.,  1., -0.,  2.],
         [ 1.,  1., -0.,  1.],
         [ 1.,  0.,  0.,  1.]],

        [[ 1., -0.,  1., -1.],
         [ 1., -0.,  1.,  0.],
         [ 1.,  0., -2.,  0.]]])
rate_token=torch.randn(3,3)
idx=torch.tensor([0,2])
x_1=torch.index_select(rate_token,0,idx) #idx的数值不能超过rate_token第0维的大小,输出维度[idx.numel(),rate_token(1)]
# x_2=torch.index_select(x,1,idx)
print(f"rate_token:\n{rate_token}")
print(f"x_1:\n{x_1}")
rate_token:
tensor([[ 0.2069, -1.4461, -0.6306],
        [ 0.4353, -0.2097, -0.7552],
        [ 0.2192,  0.5699,  0.0873]])
x_1:
tensor([[ 0.2069, -1.4461, -0.6306],
        [ 0.2192,  0.5699,  0.0873]])
mask = torch.randint(0,2,(3,4))
rate_constraint = torch.randint(0,2,(3,4))
mask_new = torch.zeros_like(mask)
mask_new[mask < rate_constraint] = 1
mask_new[mask >= rate_constraint] = 0
print(f"mask:\n{mask}")
print(f"rate_constraint:\n{rate_constraint}")
print(f"mask_new:\n{mask_new}")
mask:
tensor([[1, 1, 0, 0],
        [1, 0, 0, 1],
        [1, 0, 1, 0]])
rate_constraint:
tensor([[1, 0, 1, 0],
        [0, 0, 0, 0],
        [1, 1, 1, 0]])
mask_new:
tensor([[0, 0, 1, 0],
        [0, 0, 0, 0],
        [0, 1, 0, 0]])
a = torch.randint(0,2,(1,64,1,32))
b = torch.randint(2,4,(1,64,32,32))#当前两维度一样,那么后面的维度作矩阵乘法
c = torch.matmul(a, b)#矩阵乘法
print(c.shape)
torch.Size([1, 64, 1, 32])
a = torch.randint(0,2,(1,64,1,32))
a=a.squeeze()
b = torch.randint(2,4,(1,64,32))#当前两维度一样,那么后面的维度作矩阵乘法
c = a+b#矩阵乘法
print(a.shape)
print(c.shape)
torch.Size([64, 32])
torch.Size([1, 64, 32])
mask_BCHW = torch.randint(0,2,(1,32,8,8))
s_hat = torch.zeros_like(mask_BCHW)
y_sd = torch.randint(0,2,(1024,))
s_hat[mask_BCHW] = y_sd
print(f"mask_BCHW:\n{mask_BCHW}")
print(f"y_sd:\n{y_sd.shape}")
print(f"s_hat:\n{s_hat}")

5.Policy net

import torch
snr = torch.ones(10,1)*10
print(snr)
tensor([[10.],
        [10.],
        [10.],
        [10.],
        [10.],
        [10.],
        [10.],
        [10.],
        [10.],
        [10.]])
# 求平均z.mean((-2,-1))
z = torch.randint(1,5,(2,3))
z =z.to(torch.float32)
z1 = z.mean((-1))
z2 = z.mean((-2,-1))
print(f"z:\n{z}")
print(f"z1:\n{z1}")
print(f"z2:\n{z2}")
# z 的最后两个维度消失
z:
tensor([[2., 3., 3.],
        [2., 1., 1.]])
z1:
tensor([2.6667, 1.3333])
z2:
2.0
# nn.functional.gumbel_softmax
import torch.nn as nn
z = torch.randn(2,5)
soft = nn.functional.gumbel_softmax(z, 5, dim=-1)
index = torch.zeros_like(soft)
index[torch.arange(0, 2), soft.argmax(-1)] = 1
print(f"z:\n{z}")
print(f"soft:\n{soft}")
print(f"index:\n{index}")
print(f"soft.argmax(-1):\n{soft.argmax(-1)}")
#
z:
tensor([[ 0.1152, -0.8748, -0.3665, -1.4863,  0.7154],
        [-1.0183,  0.8114, -0.9055, -0.5129, -0.6120]])
soft:
tensor([[0.1880, 0.1719, 0.1971, 0.1692, 0.2738],
        [0.1328, 0.2628, 0.3199, 0.1417, 0.1428]])
index:
tensor([[0., 0., 0., 0., 1.],
        [0., 0., 1., 0., 0.]])
soft.argmax(-1):
tensor([4, 2])
#torch.argmax
a=torch.tensor([
                  [1, 5, 5, 2],
                  [9, -6, 2, 8],
                  [-3, 7, -9, 1]
              ])
# b=torch.argmax(a,dim=0)
# c=torch.argmax(a,dim=1)
b=a.argmax(dim=0)
c=a.argmax(dim=1)
print(b)
print(c)
print(a.shape)
#选取dim维度上最大值的索引
tensor([1, 2, 0, 1])
tensor([1, 0, 1])
torch.Size([3, 4])
#one-hot
z = torch.randn(2,5)
soft = nn.functional.gumbel_softmax(z, 5, dim=-1)
h = soft
# 1. flip the order
h = torch.flip(h, [-1]) #第一个参数是输入,第二个参数是输入的第几维度,按照维度对输入进行翻转
# 2. Accumulate sum
s = torch.cumsum(h, -1) #返回维度dim中输入元素的累计和
# 3. flip the result
x = torch.flip(s, [-1])
print(f"z:\n{z}")
print(f"soft:\n{soft}")
print(f"h:\n{h}")
print(f"s:\n{s}")
print(f"x:\n{x}")
z:
tensor([[ 0.6831,  0.3028, -0.4851, -0.6090,  0.8904],
        [ 1.1903,  0.8956, -1.2509, -0.3563, -1.7219]])
soft:
tensor([[0.2491, 0.1865, 0.2170, 0.1678, 0.1794],
        [0.2800, 0.2449, 0.1376, 0.1516, 0.1858]])
h:
tensor([[0.1794, 0.1678, 0.2170, 0.1865, 0.2491],
        [0.1858, 0.1516, 0.1376, 0.2449, 0.2800]])
s:
tensor([[0.1794, 0.3473, 0.5643, 0.7509, 1.0000],
        [0.1858, 0.3375, 0.4751, 0.7200, 1.0000]])
x:
tensor([[1.0000, 0.7509, 0.5643, 0.3473, 0.1794],
        [1.0000, 0.7200, 0.4751, 0.3375, 0.1858]])
#torch.flip
z = torch.randn(2,5)
soft = nn.functional.gumbel_softmax(z, 5, dim=-1)
h = soft
# 1. flip the order
h = torch.flip(h, [0])
print(f"z:\n{z}")
print(f"soft:\n{soft}")
print(f"h:\n{h}")
z:
tensor([[-0.5672, -1.3687, -0.4187,  0.2370, -1.1392],
        [ 0.4554, -0.0986,  0.1601, -0.9429,  0.0366]])
soft:
tensor([[0.2312, 0.2181, 0.2057, 0.1906, 0.1543],
        [0.2791, 0.1794, 0.1821, 0.1342, 0.2253]])
h:
tensor([[0.2791, 0.1794, 0.1821, 0.1342, 0.2253],
        [0.2312, 0.2181, 0.2057, 0.1906, 0.1543]])
#keepdim=True
latent = torch.randn(1,2,3)
latent_sum= torch.sqrt((latent**2).mean((-2, -1), keepdim=True))
b= torch.sqrt((latent**2).mean((-2, -1)))
latent1 = latent / latent_sum
print(f"latent:\n{latent}")
print(f"a:\n{latent_sum}")
print(f"b:\n{b}")
print(f"latent1:\n{latent1}")
latent:
tensor([[[ 0.6348,  0.3374, -0.4481],
         [ 1.0453,  0.0320,  1.4410]]])
a:
tensor([[[0.8050]]])
b:
tensor([0.8050])
latent1:
tensor([[[ 0.7886,  0.4191, -0.5567],
         [ 1.2986,  0.0398,  1.7902]]])
mask = torch.zeros(2,3)
mask[:,1:3] = 1
nonzero_indices = mask.nonzero()
num_nonzero_elements = nonzero_indices.size(0)
print(nonzero_indices)
print(nonzero_indices.shape)
print(mask)
print(num_nonzero_elements)
tensor([[0, 1],
        [0, 2],
        [1, 1],
        [1, 2]])
torch.Size([4, 2])
tensor([[0., 1., 1.],
        [0., 1., 1.]])
4
mask = torch.ones(2,3)
mask_sum = mask.sum((-2,-1))
print(mask_sum)
tensor(6.)
#两种计算cbr的方式
nonzero_indices = hard_mask.nonzero() #获取非0值的索引,返回(A,B)一共A个非零元素,B是索引
num_nonzero_elements = nonzero_indices.size(0) #获取A = Batchsize*[4,8]
cbr = num_nonzero_elements*(latent_res.size(-1)) / x.numel() / 2 #每个长度是(latent_res.size(-1))

count_sum = hard_mask.sum((-2,-1))
cbr2 = count_sum*(latent_res.size(-1)) / x.numel() /2
3
epoch = 10
if epoch % 10 == 0 or epoch == 199: 
    print("ok")
ok

6.I++

embed_size=256
image_dims= [32, 32]
depth= [2, 4]
embed_size = 256
window_size = 8
mlp_ratio = 4
n_adapt_embed = 2
max_trans_feat= 6
min_trans_feat = 1
unit_trans_feat = 4
n_patches = 64
n_feat=48
n_trans_feat = 16
embed_dims=[embed_size,embed_size]
threshold = 0.25
alpha = 2
min_clip = 0
max_clip = 10
# embed_dims=[args.embed_size, args.embed_size]
#np.random.randn(1) 生成一个服从标准正态分布(均值为0,标准差为1)的随机数
import numpy as np
a = np.random.randn(1)
b = a[0]
print(f"a={a}")
print(type(a))
print(type(b))
# print(f"b={b}")
a=[0.70869371]
<class 'numpy.ndarray'>
<class 'numpy.float64'>
#[snr, bw] class Swin_JSCC(nn.Module)
# -def train_epoch(loader, model, solvers, weight):
import torch
min_trans_feat=5
max_trans_feat=8
link_qual = 7.0
link_rng = 3.0
snr = link_qual + link_rng*np.random.randn(1)[0]
bw = np.random.randint(min_trans_feat, max_trans_feat+1)
adapt_embed = torch.from_numpy(np.array([snr, bw])).float()
print(f"snr={snr} {type(snr)}")
print(f"bw={bw} {type(bw)}")
print(f"adapt_embed={adapt_embed} {type(adapt_embed)}")
snr=6.635929147639401 <class 'numpy.float64'>
bw=7 <class 'int'>
adapt_embed=tensor([6.6359, 7.0000]) <class 'torch.Tensor'>
# nn.Linear
import torch.nn as nn
adapt_embed = torch.tensor([6.6359, 7.0000])
n_adapt_embed = 2
print(f"adapt_embed.unsqueeze(0)={adapt_embed.unsqueeze(0)}")
adapt_embed = nn.Linear(2, n_adapt_embed)(adapt_embed.unsqueeze(0))
print(f"adapt_embed={adapt_embed} {type(adapt_embed)}")
adapt_embed.unsqueeze(0)=tensor([[6.6359, 7.0000]])
adapt_embed=tensor([[-1.9270,  5.8020]], grad_fn=<AddmmBackward0>) <class 'torch.Tensor'>
#class Swin_Encoder(nn.Module)
# -self.patch_embed = PatchEmbed(img_size, 2, 3, embed_dims[0], nn.LayerNorm)
# --x = self.proj(x).flatten(2).transpose(1, 2)
x = torch.randn(1,3,32,32) 
patch_size=2
in_chans=3
embed_dim=256
norm_layer=None    
proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
# x = proj(x).flatten(2).transpose(1, 2)
x_proj = proj(x)
x_flatten = x_proj.flatten(2)
x_tanspose = x_flatten.transpose(1, 2)
x_out = nn.LayerNorm(embed_dim)(x_tanspose)
print(f"x={x.shape}")
print(f"x_proj={x_proj.shape}")
print(f"x_flatten={x_flatten.shape}")
print(f"x_tanspose={x_tanspose.shape}")
print(f"x_out={x_out.shape}")
# PatchEmbed(img_size, 2, 3, embed_dims[0], nn.LayerNorm)
x=torch.Size([1, 3, 32, 32])
x_proj=torch.Size([1, 256, 16, 16])
x_flatten=torch.Size([1, 256, 256])
x_tanspose=torch.Size([1, 256, 256])
x_out=torch.Size([1, 256, 256])
#adapt_embed.unsqueeze(1).repeat
adapt_embed = torch.tensor([[-1.9270,  5.8020]]) #(1,2)-(1,1,2)-(32,56,2)
adapt_embed = adapt_embed.unsqueeze(1).repeat(32, 256, 1) 
print(f"adapt_embed={adapt_embed.shape}")
# print(f"x_unsqueeze={x_unsqueeze.shape}")
adapt_embed=torch.Size([32, 256, 2])
x = torch.randn(1,3,32,32)
x_unsqueeze = x.unsqueeze(2)
print(f"x={x.shape}")
print(f"x_unsqueeze={x_unsqueeze.shape}")
x=torch.Size([1, 3, 32, 32])
x_unsqueeze=torch.Size([1, 3, 1, 32, 32])
adapt_proj = nn.Linear(embed_dims[0] + n_adapt_embed, embed_dims[0])
x = adapt_proj()()
images = torch.randn(1,3,32,32)
output = torch.randn(1,3,32,32)
weight = np.array([1.0 for _ in range(min_trans_feat,max_trans_feat+1)])
print(weight)
loss = nn.MSELoss()(output, images)
print(loss)
# loss = nn.MSELoss()(output, images)* weight[bw-min_trans_feat]
[1. 1. 1. 1. 1. 1.]
tensor(1.9309)
from collections import OrderedDict
epoch_postfix = OrderedDict()
epoch_postfix['l2_loss'] = '{:.4f}'.format(loss.item())
print(epoch_postfix)
# https://huaweicloud.csdn.net/63808c16dacf622b8df8a6bd.html
OrderedDict([('l2_loss', '1.9309')])
weight = np.array([1.0 for _ in range(1, 7)])
print(weight)
print(len(weight))
[1. 1. 1. 1. 1. 1.]
6
for i in range(len(weight)):
    print(i)
0
1
2
3
4
delta = np.array([24.75, 27.85, 30.1526917,	32.01,	33.2777652,	34.55393814])
for i in range(len(delta)):
    print(i)
    
0
1
2
3
4
5
文章来源:https://blog.csdn.net/qq_41100635/article/details/135035626
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。