pytorch 如何使用batch訓(xùn)練lstm網(wǎng)絡(luò)
batch的lstm
# 導(dǎo)入相應(yīng)的包 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as Data torch.manual_seed(1) # 準(zhǔn)備數(shù)據(jù)的階段 def prepare_sequence(seq, to_ix): idxs = [to_ix[w] for w in seq] return torch.tensor(idxs, dtype=torch.long) with open("/home/lstm_train.txt", encoding='utf8') as f: train_data = [] word = [] label = [] data = f.readline().strip() while data: data = data.strip() SP = data.split(' ') if len(SP) == 2: word.append(SP[0]) label.append(SP[1]) else: if len(word) == 100 and 'I-PRO' in label: train_data.append((word, label)) word = [] label = [] data = f.readline() word_to_ix = {} for sent, _ in train_data: for word in sent: if word not in word_to_ix: word_to_ix[word] = len(word_to_ix) tag_to_ix = {"O": 0, "I-PRO": 1} for i in range(len(train_data)): train_data[i] = ([word_to_ix[t] for t in train_data[i][0]], [tag_to_ix[t] for t in train_data[i][1]]) # 詞向量的維度 EMBEDDING_DIM = 128 # 隱藏層的單元數(shù) HIDDEN_DIM = 128 # 批大小 batch_size = 10 class LSTMTagger(nn.Module): def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, batch_size): super(LSTMTagger, self).__init__() self.hidden_dim = hidden_dim self.batch_size = batch_size self.word_embeddings = nn.Embedding(vocab_size, embedding_dim) # The LSTM takes word embeddings as inputs, and outputs hidden states # with dimensionality hidden_dim. self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) # The linear layer that maps from hidden state space to tag space self.hidden2tag = nn.Linear(hidden_dim, tagset_size) def forward(self, sentence): embeds = self.word_embeddings(sentence) # input_tensor = embeds.view(self.batch_size, len(sentence) // self.batch_size, -1) lstm_out, _ = self.lstm(embeds) tag_space = self.hidden2tag(lstm_out) scores = F.log_softmax(tag_space, dim=2) return scores def predict(self, sentence): embeds = self.word_embeddings(sentence) lstm_out, _ = self.lstm(embeds) tag_space = self.hidden2tag(lstm_out) scores = F.log_softmax(tag_space, dim=2) return scores loss_function = nn.NLLLoss() model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix), batch_size) optimizer = optim.SGD(model.parameters(), lr=0.1) data_set_word = [] data_set_label = [] for data_tuple in train_data: data_set_word.append(data_tuple[0]) data_set_label.append(data_tuple[1]) torch_dataset = Data.TensorDataset(torch.tensor(data_set_word, dtype=torch.long), torch.tensor(data_set_label, dtype=torch.long)) # 把 dataset 放入 DataLoader loader = Data.DataLoader( dataset=torch_dataset, # torch TensorDataset format batch_size=batch_size, # mini batch size shuffle=True, # num_workers=2, # 多線程來讀數(shù)據(jù) ) # 訓(xùn)練過程 for epoch in range(200): for step, (batch_x, batch_y) in enumerate(loader): # 梯度清零 model.zero_grad() tag_scores = model(batch_x) # 計(jì)算損失 tag_scores = tag_scores.view(-1, tag_scores.shape[2]) batch_y = batch_y.view(batch_y.shape[0]*batch_y.shape[1]) loss = loss_function(tag_scores, batch_y) print(loss) # 后向傳播 loss.backward() # 更新參數(shù) optimizer.step() # 測試過程 with torch.no_grad(): inputs = torch.tensor([data_set_word[0]], dtype=torch.long) print(inputs) tag_scores = model.predict(inputs) print(tag_scores.shape) print(torch.argmax(tag_scores, dim=2))
補(bǔ)充:PyTorch基礎(chǔ)-使用LSTM神經(jīng)網(wǎng)絡(luò)實(shí)現(xiàn)手寫數(shù)據(jù)集識(shí)別
看代碼吧~
import numpy as np import torch from torch import nn,optim from torch.autograd import Variable from torchvision import datasets,transforms from torch.utils.data import DataLoader
# 訓(xùn)練集 train_data = datasets.MNIST(root="./", # 存放位置 train = True, # 載入訓(xùn)練集 transform=transforms.ToTensor(), # 把數(shù)據(jù)變成tensor類型 download = True # 下載 ) # 測試集 test_data = datasets.MNIST(root="./", train = False, transform=transforms.ToTensor(), download = True )
# 批次大小 batch_size = 64 # 裝載訓(xùn)練集 train_loader = DataLoader(dataset=train_data,batch_size=batch_size,shuffle=True) # 裝載測試集 test_loader = DataLoader(dataset=test_data,batch_size=batch_size,shuffle=True)
for i,data in enumerate(train_loader): inputs,labels = data print(inputs.shape) print(labels.shape) break
# 定義網(wǎng)絡(luò)結(jié)構(gòu) class LSTM(nn.Module): def __init__(self): super(LSTM,self).__init__()# 初始化 self.lstm = torch.nn.LSTM( input_size = 28, # 表示輸入特征的大小 hidden_size = 64, # 表示lstm模塊的數(shù)量 num_layers = 1, # 表示lstm隱藏層的層數(shù) batch_first = True # lstm默認(rèn)格式input(seq_len,batch,feature)等于True表示input和output變成(batch,seq_len,feature) ) self.out = torch.nn.Linear(in_features=64,out_features=10) self.softmax = torch.nn.Softmax(dim=1) def forward(self,x): # (batch,seq_len,feature) x = x.view(-1,28,28) # output:(batch,seq_len,hidden_size)包含每個(gè)序列的輸出結(jié)果 # 雖然lstm的batch_first為True,但是h_n,c_n的第0個(gè)維度還是num_layers # h_n :[num_layers,batch,hidden_size]只包含最后一個(gè)序列的輸出結(jié)果 # c_n:[num_layers,batch,hidden_size]只包含最后一個(gè)序列的輸出結(jié)果 output,(h_n,c_n) = self.lstm(x) output_in_last_timestep = h_n[-1,:,:] x = self.out(output_in_last_timestep) x = self.softmax(x) return x
# 定義模型 model = LSTM() # 定義代價(jià)函數(shù) mse_loss = nn.CrossEntropyLoss()# 交叉熵 # 定義優(yōu)化器 optimizer = optim.Adam(model.parameters(),lr=0.001)# 隨機(jī)梯度下降
# 定義模型訓(xùn)練和測試的方法 def train(): # 模型的訓(xùn)練狀態(tài) model.train() for i,data in enumerate(train_loader): # 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽 inputs,labels = data # 獲得模型預(yù)測結(jié)果(64,10) out = model(inputs) # 交叉熵代價(jià)函數(shù)out(batch,C:類別的數(shù)量),labels(batch) loss = mse_loss(out,labels) # 梯度清零 optimizer.zero_grad() # 計(jì)算梯度 loss.backward() # 修改權(quán)值 optimizer.step() def test(): # 模型的測試狀態(tài) model.eval() correct = 0 # 測試集準(zhǔn)確率 for i,data in enumerate(test_loader): # 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽 inputs,labels = data # 獲得模型預(yù)測結(jié)果(64,10) out = model(inputs) # 獲得最大值,以及最大值所在的位置 _,predicted = torch.max(out,1) # 預(yù)測正確的數(shù)量 correct += (predicted==labels).sum() print("Test acc:{0}".format(correct.item()/len(test_data))) correct = 0 for i,data in enumerate(train_loader): # 訓(xùn)練集準(zhǔn)確率 # 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽 inputs,labels = data # 獲得模型預(yù)測結(jié)果(64,10) out = model(inputs) # 獲得最大值,以及最大值所在的位置 _,predicted = torch.max(out,1) # 預(yù)測正確的數(shù)量 correct += (predicted==labels).sum() print("Train acc:{0}".format(correct.item()/len(train_data)))
# 訓(xùn)練 for epoch in range(10): print("epoch:",epoch) train() test()
以上為個(gè)人經(jīng)驗(yàn),希望能給大家一個(gè)參考,也希望大家多多支持腳本之家。
相關(guān)文章
Python實(shí)現(xiàn)比較兩個(gè)文件夾中代碼變化的方法
這篇文章主要介紹了Python實(shí)現(xiàn)比較兩個(gè)文件夾中代碼變化的方法,實(shí)例分析了Python讀取文件夾中文件及字符串操作的相關(guān)技巧,具有一定參考借鑒價(jià)值,需要的朋友可以參考下2015-07-07Python實(shí)現(xiàn)賬號(hào)密碼輸錯(cuò)三次即鎖定功能簡單示例
這篇文章主要介紹了Python實(shí)現(xiàn)賬號(hào)密碼輸錯(cuò)三次即鎖定功能,結(jié)合實(shí)例形式分析了Python文件讀取、流程控制、數(shù)據(jù)判斷等相關(guān)操作技巧,需要的朋友可以參考下2019-03-03Python數(shù)據(jù)庫編程之SQLite和MySQL的實(shí)踐指南
這篇文章主要為大家詳細(xì)介紹了Python數(shù)據(jù)庫編程中SQLite和MySQL的相關(guān)操作指南,文中的示例代碼講解詳細(xì),感興趣的小伙伴可以跟隨小編一起學(xué)習(xí)一下2024-03-03Python中eval()函數(shù)的功能及使用方法小結(jié)
python中eval(str)函數(shù)很強(qiáng)大,官方解釋為:將字符串str當(dāng)成有效的表達(dá)式來求值并返回計(jì)算結(jié)果,所以,結(jié)合math當(dāng)成一個(gè)計(jì)算器很好用2023-05-05python pycharm最新版本激活碼(永久有效)附python安裝教程
PyCharm是一個(gè)多功能的集成開發(fā)環(huán)境,只需要在pycharm中創(chuàng)建python file就運(yùn)行python,并且pycharm內(nèi)置完備的功能,這篇文章給大家介紹python pycharm激活碼最新版,需要的朋友跟隨小編一起看看吧2020-01-01如何使用Python修改matplotlib.pyplot.colorbar的位置以對齊主圖
使用matplotlib.colors模塊可以完成大多數(shù)常見的任務(wù),下面這篇文章主要給大家介紹了關(guān)于如何使用Python修改matplotlib.pyplot.colorbar的位置以對齊主圖的相關(guān)資料,需要的朋友可以參考下2022-07-07Django靜態(tài)資源URL STATIC_ROOT的配置方法
這篇文章主要介紹了Django靜態(tài)資源URL STATIC_ROOT的配置方法,本文給出配置方法和兩種使用方法,需要的朋友可以參考下2014-11-11