您好,欢迎来到五一七教育网。
搜索
您的当前位置:首页pytorch报错

pytorch报错

来源:五一七教育网

 File "D:\ka\PycharmProjects\lstm-mlp code\main\lstm.py", line 287, in <module>
    out_t, hn, cn = model(train_batch[t, :])
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\ka\PycharmProjects\lstm-mlp code\main\lstm.py", line 109, in forward
    h_next, c_next = self.lstmcell(x_cur, (h_cur, c_cur))
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\rnn.py", line 1197, in forward
    self.bias_ih, self.bias_hh,
RuntimeError: The size of tensor a (3600) must match the size of tensor b (900) at non-singleton dimension 2
上面是错误,下面是完整代码,我实在是看不出来了。前面的main函数里的值应该用不到,我导入的xlsx表格中的数据都是(6,900)维度大小的数据,也就是那四个表格中都有7*900个数值。

num_heads = 3
num_layers = 3
hidden_dim = 50
lstm_size = 900


class LSTM_Transformer_MLP(nn.Module):
    def __init__(self, input_size=900, lstm_size=900, num_heads=3, num_layers=3, hidden_dim=50):
        super(LSTM_Transformer_MLP, self).__init__()
        # 确保 lstm_size 能够整除 num_heads
        if lstm_size % num_heads != 0:
            lstm_size = math.ceil(lstm_size / num_heads) * num_heads
        self.input_size = input_size
        self.lstm_size = lstm_size
        self.num_heads = num_heads

        self.lstmcell = nn.LSTMCell(input_size=self.input_size, hidden_size=self.lstm_size)
        

        encoder_layer = nn.TransformerEncoderLayer(d_model=self.lstm_size, nhead=self.num_heads)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        decoder_layer = nn.TransformerDecoderLayer(d_model=self.lstm_size, nhead=self.num_heads)
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)

        self.out = nn.Sequential(
            nn.ReLU(),
            nn.Linear(self.lstm_size, 50),
            nn.ReLU(),
            nn.Linear(50, 900)
        )

    def forward(self, x_cur, h_cur=None, c_cur=None):

        batch_size = x_cur.shape[0]
        if h_cur is None and c_cur is None:
            h_cur = torch.zeros(batch_size, self.lstm_size, device=x_cur.device)
            c_cur = torch.zeros(batch_size, self.lstm_size, device=x_cur.device)

        h_next, c_next = self.lstmcell(x_cur, (h_cur, c_cur))
        out = self.out(h_next)
        # 使用 LSTM 输出作为 Transformer 的输入
        transformer_input = out.unsqueeze(0)  # 添加维度以匹配 Transformer 输入形状

        encoder_output = self.transformer_encoder(transformer_input)
        decoder_output = self.transformer_decoder(transformer_input, encoder_output)

        out = self.out(decoder_output.squeeze(0))  # 去除添加的维度

        return out, h_next, c_next


def calc_error(pred, target):
    error = np.sqrt(np.sum((pred - target) ** 2))
    step_error = error / pred.shape[0]
    avg_error = step_error / pred.shape[1] / pred.shape[2]
    return avg_error, step_error, error


def calc_nmse(pred, target):
    nmse = np.sum(np.abs((pred - target)) ** 2 / np.abs(target) ** 2) / pred.size
    return nmse


if __name__ == '__main__':
    
    train_file = 'dataset_input'  # 输入数据
    label_file = 'dataset_out'
    # train_file = 'dataset_input_30'
    # label_file = 'dataset_out_30'
    train_rate = 0.75
    val_rate = 0.25
    initial = False
    r_cnt = 1
    sub_index = range()
    c_index = np.array([a + 32 for a in [-21, -7, 7, 21]])  # 导频子载波索引,初始索引为0
    e_index = np.concatenate([range(-32, -26), [0], range(27, 32)], axis=0) + 32
    other_index = np.concatenate([c_index, e_index], axis=0)
    d_index = np.delete(sub_index, other_index)  # data symbol index
    d2_index = np.concatenate([d_index, d_index + ],
                              axis=0)  # index of the data subcarriers for real and imaginary part
    c2_index = np.concatenate([c_index, c_index + ], axis=0)  # 导频子载波索引(实部虚部)
    non_empty_index = np.concatenate((range(6, 32), range(33, 59), range(70, 96), range(97, 123)), axis=0)  # 非空子载波索引
    # ----------------------train parameter-------------------------- #
    LR = 0.01
    EPOCH = 50
    BATCH_SIZE = 6
    input_size = 900
    lstm_size = 900
    clip = 1e-4
    weight_decay = 0
    step_size = 10
    gamma = 0.8
    # ----------------------- load data -------------------------------- #
    
    # 训练数据的处理
    input_data = pd.read_excel('dataset_input.xlsx')
    label_data = pd.read_excel('dataset_output.xlsx')

    input_data_sclar = input_data.values
    label_data_sclar = label_data.values

    # 验证数据的处理
    input_data1 = pd.read_excel('dataset1_input.xlsx')
    label_data1 = pd.read_excel('dataset1_output.xlsx')

    input_data1_sclar = input_data1.values
    label_data1_sclar = label_data1.values

    # 计算数据集大小
    train_nums = (input_data.shape[0] + 1) * input_data.shape[1]
    val_nums = (input_data1.shape[0] + 1) * input_data1.shape[1]
    # val_nums = int(nums * val_rate)
    print('train set size: ', train_nums, ', val set size: ', val_nums)

    # 分配训练数据集和验证数据集  将切片后的 NumPy 数组转换为 PyTorch 张量再转换成浮点类型
    # input_data_sclar 是一个 NumPy 数组。

    train_input = torch.from_numpy(input_data_sclar[:train_nums, :]).type(torch.FloatTensor)
    train_label = torch.from_numpy(label_data_sclar[-val_nums:]).type(torch.FloatTensor)

    val_input = torch.from_numpy(input_data1_sclar).type(torch.FloatTensor)
    val_label = torch.from_numpy(label_data1_sclar).type(torch.FloatTensor)

    # ----------------------------- load model ---------------------------- #
    dir_name = './lstm_mlp_' + train_file
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if initial is False:
        # ---------------- generate batch dataset ------------------- #
        dataset = data.TensorDataset(train_input, train_label)

        loader = data.DataLoader(
            dataset=dataset,  # torch TensorDataset format
            batch_size=BATCH_SIZE,  # mini batch size
            shuffle=True,

            num_workers=8 if torch.cuda.is_available() else 0
        )
    if initial is False:
        # ---------------- generate batch dataset ------------------- #
        dataset = data.TensorDataset(val_input, val_label)

        valloader = data.DataLoader(
            dataset=dataset,  # torch TensorDataset format
            batch_size=BATCH_SIZE,  # mini batch size
            shuffle=True,

            num_workers=8 if torch.cuda.is_available() else 0
        )

        # ---------------------- train the model ------------------------ #
        r_min_err = float('inf')
        for r in range(r_cnt):
            # ---------------- instantiate a model and optimizer ------------------- #
            model = LSTM_Transformer_MLP(input_size, lstm_size, num_heads, num_layers, hidden_dim).to(device)

            optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=weight_decay)
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
            criterion = nn.MSELoss()

            LOSS_TRAIN = []
            LOSS_VAL = []
            nmse_val = []
            STEP = 0

            min_err = float('inf')
            time_train = 0

            for epoch in range(EPOCH):
                # ---------------------- train ------------------------ #
                start = time.time()
                with torch.set_grad_enabled(True):
                    scheduler.step()
                    model.train()
                    for step, (train_batch, label_batch) in enumerate(loader):
                        train_batch, label_batch = train_batch.to(device), label_batch.to(device)
                        optimizer.zero_grad()
                        print("train_batch的维度:", train_batch.shape)  # (6,900)

                       
                        outputs = torch.zeros(train_batch.size(0), 900)  # 创建一个全零的张量来保存输出[6,900]
                        for t in range(train_batch.size(0)):
                            if t == 0:
                                
                                out_t, hn, cn = model(train_batch[t, :])

                                
                            else:
                                
                                out_t, hn, cn = model(train_batch[t, :], hn, cn)
                            outputs[t, :] = out_t

                           
                            print("验证后的outputs的维度:", outputs.shape)  # [6,9]
                           
                        loss = criterion(outputs, train_label)
                        loss.backward()
                        nn.utils.clip_grad_norm_(model.parameters(), clip)
                        optimizer.step()

                        avg_err, s_err, error = calc_error(outputs.detach().cpu().numpy(),
                                                           label_batch.detach().cpu().numpy())
                        if step % 200 == 0:
                            print('Epoch: ', epoch, '| Step: ', step, '| loss: ', loss.item(), '| err: ', avg_err)
                            LOSS_TRAIN.append(loss.item())

                time_train += time.time() - start

                # ---------------------- validation ------------------------ #
                with torch.set_grad_enabled(False):
                    model.eval()

                    for val_batch, label_batch in valloader:
                        val_batch, label_batch = val_batch.to(device), label_batch.to(device)

                    # output = torch.zeros_like(label_batch)
                    print("val_input的维度:", val_batch.shape)  # [6,900]
                    

                    outputa = torch.zeros(val_batch.size(0), 900)  # 创建一个全零的张量来保存输出[6,900]
                   

                    for t in range(val_batch.size(0)):
                        if t == 0:
                            val_t, hn, cn = model(val_batch[t, :])
                        else:
                            val_t, hn, cn = model(val_batch[t, :], hn, cn)
                        outputa[t, :] = val_t

                        print("验证后的output的维度:", output.shape)  # [6,9]

                        
                    loss = criterion(outputa, val_label)

                    avg_err, s_err, error = calc_error(outputa.detach().cpu().numpy(),
                                                       label_batch.detach().cpu().numpy())
                    print('Epoch: ', epoch, '| val err: ', avg_err)
                    LOSS_VAL.append(loss.item())  # 将每一轮训练得到的损失值添加到 LOSS_VAL 列表中

                    from sklearn.preprocessing import StandardScaler

                    # 创建和实例化 StandardScaler 对象
                    scaler = StandardScaler()
                    out1 = scaler.inverse_transform(
                        scaler.fit_transform(outputa.detach().cpu().numpy().reshape(-1, 2))).reshape(outputa.shape)
                    val_label1 = scaler.inverse_transform(
                        scaler.fit_transform(val_label.detach().cpu().numpy().reshape(-1, 2))).reshape(val_label.shape)
                    nmse = calc_nmse(out1, val_label1)
                    nmse_val.append(nmse)

                    if avg_err < min_err:
                        min_err = avg_err
                        best_model_wts = copy.deepcopy(model.state_dict())

            if min_err < r_min_err:
                r_min_err = min_err
                r_best_model_wts = best_model_wts

        model.load_state_dict(r_best_model_wts)
        torch.save(model.to('cpu'), dir_name + '.pkl')

        plt.figure(1)
        x = range(EPOCH)
        plt.semilogy(x, LOSS_TRAIN, 'r-', label='loss_train')
        plt.semilogy(x, LOSS_VAL, 'b-', label='loss_val')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()

        NMSE = np.array(nmse_val)
        sio.savemat('nmse_' + str(lstm_size) + '_' + str(EPOCH) + '_' + train_file + '.mat', {'nmse': NMSE})

    else:
        model = torch.load(dir_name + '.pkl')

plt.show()

因篇幅问题不能全部显示,请点此查看更多更全内容

Copyright © 2019- 517ttc.cn 版权所有 赣ICP备2024042791号-8

违法及侵权请联系:TEL:199 18 7713 E-MAIL:2724546146@qq.com

本站由北京市万商天勤律师事务所王兴未律师提供法律服务