pytorch 如何使用batch訓練lstm網(wǎng)絡
更新時間:2021年05月28日 14:29:37 作者:king的江鳥
這篇文章主要介紹了pytorch 如何使用batch訓練lstm網(wǎng)絡的操作,具有很好的參考價值,希望對大家有所幫助。如有錯誤或未考慮完全的地方,望不吝賜教
batch的lstm
# 導入相應的包
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data
torch.manual_seed(1)
# 準備數(shù)據(jù)的階段
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
with open("/home/lstm_train.txt", encoding='utf8') as f:
train_data = []
word = []
label = []
data = f.readline().strip()
while data:
data = data.strip()
SP = data.split(' ')
if len(SP) == 2:
word.append(SP[0])
label.append(SP[1])
else:
if len(word) == 100 and 'I-PRO' in label:
train_data.append((word, label))
word = []
label = []
data = f.readline()
word_to_ix = {}
for sent, _ in train_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = {"O": 0, "I-PRO": 1}
for i in range(len(train_data)):
train_data[i] = ([word_to_ix[t] for t in train_data[i][0]], [tag_to_ix[t] for t in train_data[i][1]])
# 詞向量的維度
EMBEDDING_DIM = 128
# 隱藏層的單元數(shù)
HIDDEN_DIM = 128
# 批大小
batch_size = 10
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, batch_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
# input_tensor = embeds.view(self.batch_size, len(sentence) // self.batch_size, -1)
lstm_out, _ = self.lstm(embeds)
tag_space = self.hidden2tag(lstm_out)
scores = F.log_softmax(tag_space, dim=2)
return scores
def predict(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, _ = self.lstm(embeds)
tag_space = self.hidden2tag(lstm_out)
scores = F.log_softmax(tag_space, dim=2)
return scores
loss_function = nn.NLLLoss()
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix), batch_size)
optimizer = optim.SGD(model.parameters(), lr=0.1)
data_set_word = []
data_set_label = []
for data_tuple in train_data:
data_set_word.append(data_tuple[0])
data_set_label.append(data_tuple[1])
torch_dataset = Data.TensorDataset(torch.tensor(data_set_word, dtype=torch.long), torch.tensor(data_set_label, dtype=torch.long))
# 把 dataset 放入 DataLoader
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, #
num_workers=2, # 多線程來讀數(shù)據(jù)
)
# 訓練過程
for epoch in range(200):
for step, (batch_x, batch_y) in enumerate(loader):
# 梯度清零
model.zero_grad()
tag_scores = model(batch_x)
# 計算損失
tag_scores = tag_scores.view(-1, tag_scores.shape[2])
batch_y = batch_y.view(batch_y.shape[0]*batch_y.shape[1])
loss = loss_function(tag_scores, batch_y)
print(loss)
# 后向傳播
loss.backward()
# 更新參數(shù)
optimizer.step()
# 測試過程
with torch.no_grad():
inputs = torch.tensor([data_set_word[0]], dtype=torch.long)
print(inputs)
tag_scores = model.predict(inputs)
print(tag_scores.shape)
print(torch.argmax(tag_scores, dim=2))
補充:PyTorch基礎-使用LSTM神經(jīng)網(wǎng)絡實現(xiàn)手寫數(shù)據(jù)集識別
看代碼吧~
import numpy as np import torch from torch import nn,optim from torch.autograd import Variable from torchvision import datasets,transforms from torch.utils.data import DataLoader
# 訓練集
train_data = datasets.MNIST(root="./", # 存放位置
train = True, # 載入訓練集
transform=transforms.ToTensor(), # 把數(shù)據(jù)變成tensor類型
download = True # 下載
)
# 測試集
test_data = datasets.MNIST(root="./",
train = False,
transform=transforms.ToTensor(),
download = True
)
# 批次大小 batch_size = 64 # 裝載訓練集 train_loader = DataLoader(dataset=train_data,batch_size=batch_size,shuffle=True) # 裝載測試集 test_loader = DataLoader(dataset=test_data,batch_size=batch_size,shuffle=True)
for i,data in enumerate(train_loader):
inputs,labels = data
print(inputs.shape)
print(labels.shape)
break
# 定義網(wǎng)絡結構
class LSTM(nn.Module):
def __init__(self):
super(LSTM,self).__init__()# 初始化
self.lstm = torch.nn.LSTM(
input_size = 28, # 表示輸入特征的大小
hidden_size = 64, # 表示lstm模塊的數(shù)量
num_layers = 1, # 表示lstm隱藏層的層數(shù)
batch_first = True # lstm默認格式input(seq_len,batch,feature)等于True表示input和output變成(batch,seq_len,feature)
)
self.out = torch.nn.Linear(in_features=64,out_features=10)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self,x):
# (batch,seq_len,feature)
x = x.view(-1,28,28)
# output:(batch,seq_len,hidden_size)包含每個序列的輸出結果
# 雖然lstm的batch_first為True,但是h_n,c_n的第0個維度還是num_layers
# h_n :[num_layers,batch,hidden_size]只包含最后一個序列的輸出結果
# c_n:[num_layers,batch,hidden_size]只包含最后一個序列的輸出結果
output,(h_n,c_n) = self.lstm(x)
output_in_last_timestep = h_n[-1,:,:]
x = self.out(output_in_last_timestep)
x = self.softmax(x)
return x
# 定義模型 model = LSTM() # 定義代價函數(shù) mse_loss = nn.CrossEntropyLoss()# 交叉熵 # 定義優(yōu)化器 optimizer = optim.Adam(model.parameters(),lr=0.001)# 隨機梯度下降
# 定義模型訓練和測試的方法
def train():
# 模型的訓練狀態(tài)
model.train()
for i,data in enumerate(train_loader):
# 獲得一個批次的數(shù)據(jù)和標簽
inputs,labels = data
# 獲得模型預測結果(64,10)
out = model(inputs)
# 交叉熵代價函數(shù)out(batch,C:類別的數(shù)量),labels(batch)
loss = mse_loss(out,labels)
# 梯度清零
optimizer.zero_grad()
# 計算梯度
loss.backward()
# 修改權值
optimizer.step()
def test():
# 模型的測試狀態(tài)
model.eval()
correct = 0 # 測試集準確率
for i,data in enumerate(test_loader):
# 獲得一個批次的數(shù)據(jù)和標簽
inputs,labels = data
# 獲得模型預測結果(64,10)
out = model(inputs)
# 獲得最大值,以及最大值所在的位置
_,predicted = torch.max(out,1)
# 預測正確的數(shù)量
correct += (predicted==labels).sum()
print("Test acc:{0}".format(correct.item()/len(test_data)))
correct = 0
for i,data in enumerate(train_loader): # 訓練集準確率
# 獲得一個批次的數(shù)據(jù)和標簽
inputs,labels = data
# 獲得模型預測結果(64,10)
out = model(inputs)
# 獲得最大值,以及最大值所在的位置
_,predicted = torch.max(out,1)
# 預測正確的數(shù)量
correct += (predicted==labels).sum()
print("Train acc:{0}".format(correct.item()/len(train_data)))
# 訓練
for epoch in range(10):
print("epoch:",epoch)
train()
test()

以上為個人經(jīng)驗,希望能給大家一個參考,也希望大家多多支持腳本之家。
相關文章
Python實現(xiàn)賬號密碼輸錯三次即鎖定功能簡單示例
這篇文章主要介紹了Python實現(xiàn)賬號密碼輸錯三次即鎖定功能,結合實例形式分析了Python文件讀取、流程控制、數(shù)據(jù)判斷等相關操作技巧,需要的朋友可以參考下2019-03-03
Python數(shù)據(jù)庫編程之SQLite和MySQL的實踐指南
這篇文章主要為大家詳細介紹了Python數(shù)據(jù)庫編程中SQLite和MySQL的相關操作指南,文中的示例代碼講解詳細,感興趣的小伙伴可以跟隨小編一起學習一下2024-03-03
Python中eval()函數(shù)的功能及使用方法小結
python中eval(str)函數(shù)很強大,官方解釋為:將字符串str當成有效的表達式來求值并返回計算結果,所以,結合math當成一個計算器很好用2023-05-05
python pycharm最新版本激活碼(永久有效)附python安裝教程
PyCharm是一個多功能的集成開發(fā)環(huán)境,只需要在pycharm中創(chuàng)建python file就運行python,并且pycharm內置完備的功能,這篇文章給大家介紹python pycharm激活碼最新版,需要的朋友跟隨小編一起看看吧2020-01-01
如何使用Python修改matplotlib.pyplot.colorbar的位置以對齊主圖
使用matplotlib.colors模塊可以完成大多數(shù)常見的任務,下面這篇文章主要給大家介紹了關于如何使用Python修改matplotlib.pyplot.colorbar的位置以對齊主圖的相關資料,需要的朋友可以參考下2022-07-07
Django靜態(tài)資源URL STATIC_ROOT的配置方法
這篇文章主要介紹了Django靜態(tài)資源URL STATIC_ROOT的配置方法,本文給出配置方法和兩種使用方法,需要的朋友可以參考下2014-11-11

