欧美bbbwbbbw肥妇,免费乱码人妻系列日韩,一级黄片

利用PyTorch實(shí)現(xiàn)VGG16教程

 更新時間:2020年06月24日 14:25:48   作者:Oshrin  
這篇文章主要介紹了利用PyTorch實(shí)現(xiàn)VGG16教程,具有很好的參考價值,希望對大家有所幫助。一起跟隨小編過來看看吧

我就廢話不多說了,大家還是直接看代碼吧~

import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG16(nn.Module):
 
 def __init__(self):
  super(VGG16, self).__init__()
  
  # 3 * 224 * 224
  self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222
  self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222
  self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112
  
  self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110
  self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110
  self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56
  
  self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54
  self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28
  
  self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26
  self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14
  
  self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12
  self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7
  # view
  
  self.fc1 = nn.Linear(512 * 7 * 7, 4096)
  self.fc2 = nn.Linear(4096, 4096)
  self.fc3 = nn.Linear(4096, 1000)
  # softmax 1 * 1 * 1000
  
 def forward(self, x):
  
  # x.size(0)即為batch_size
  in_size = x.size(0)
  
  out = self.conv1_1(x) # 222
  out = F.relu(out)
  out = self.conv1_2(out) # 222
  out = F.relu(out)
  out = self.maxpool1(out) # 112
  
  out = self.conv2_1(out) # 110
  out = F.relu(out)
  out = self.conv2_2(out) # 110
  out = F.relu(out)
  out = self.maxpool2(out) # 56
  
  out = self.conv3_1(out) # 54
  out = F.relu(out)
  out = self.conv3_2(out) # 54
  out = F.relu(out)
  out = self.conv3_3(out) # 54
  out = F.relu(out)
  out = self.maxpool3(out) # 28
  
  out = self.conv4_1(out) # 26
  out = F.relu(out)
  out = self.conv4_2(out) # 26
  out = F.relu(out)
  out = self.conv4_3(out) # 26
  out = F.relu(out)
  out = self.maxpool4(out) # 14
  
  out = self.conv5_1(out) # 12
  out = F.relu(out)
  out = self.conv5_2(out) # 12
  out = F.relu(out)
  out = self.conv5_3(out) # 12
  out = F.relu(out)
  out = self.maxpool5(out) # 7
  
  # 展平
  out = out.view(in_size, -1)
  
  out = self.fc1(out)
  out = F.relu(out)
  out = self.fc2(out)
  out = F.relu(out)
  out = self.fc3(out)
  
  out = F.log_softmax(out, dim=1)
  return out

補(bǔ)充知識:Pytorch實(shí)現(xiàn)VGG(GPU版)

看代碼吧~

import torch
from torch import nn
from torch import optim
from PIL import Image
import numpy as np

print(torch.cuda.is_available())
device = torch.device('cuda:0')
path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/"

train_X=np.empty((2000,224,224,3),dtype="float32")
train_Y=np.empty((2000,),dtype="int")
train_XX=np.empty((2000,3,224,224),dtype="float32")

for i in range(1000):
 file_path=path+"cat."+str(i)+".jpg"
 image=Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img=np.array(resized_image)
 train_X[i,:,:,:]=img
 train_Y[i]=0

for i in range(1000):
 file_path=path+"dog."+str(i)+".jpg"
 image = Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img = np.array(resized_image)
 train_X[i+1000, :, :, :] = img
 train_Y[i+1000] = 1

train_X /= 255

index = np.arange(2000)
np.random.shuffle(index)

train_X = train_X[index, :, :, :]
train_Y = train_Y[index]

for i in range(3):
 train_XX[:,i,:,:]=train_X[:,:,:,i]
# 創(chuàng)建網(wǎng)絡(luò)

class Net(nn.Module):

 def __init__(self):
  super(Net, self).__init__()
  self.conv1 = nn.Sequential(
   nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv2 = nn.Sequential(
   nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv3 = nn.Sequential(
   nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv4 = nn.Sequential(
   nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv5 = nn.Sequential(
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.dense1 = nn.Sequential(
   nn.Linear(7*7*512,4096),
   nn.ReLU(),
   nn.Linear(4096,4096),
   nn.ReLU(),
   nn.Linear(4096,2)
  )

 def forward(self, x):
  x=self.conv1(x)
  x=self.conv2(x)
  x=self.conv3(x)
  x=self.conv4(x)
  x=self.conv5(x)
  x=x.view(-1,7*7*512)
  x=self.dense1(x)
  return x

batch_size=16
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0005)

train_loss = []
for epoch in range(10):

 for i in range(2000//batch_size):
  x=train_XX[i*batch_size:i*batch_size+batch_size]
  y=train_Y[i*batch_size:i*batch_size+batch_size]

  x = torch.from_numpy(x)  #(batch_size,input_feature_shape)
  y = torch.from_numpy(y)  #(batch_size,label_onehot_shape)
  x = x.cuda()
  y = y.long().cuda()

  out = net(x)

  loss = criterion(out, y)   # 計算兩者的誤差
  optimizer.zero_grad()    # 清空上一步的殘余更新參數(shù)值
  loss.backward()     # 誤差反向傳播, 計算參數(shù)更新值
  optimizer.step()     # 將參數(shù)更新值施加到 net 的 parameters 上
  train_loss.append(loss.item())

  print(epoch, i*batch_size, np.mean(train_loss))
  train_loss=[]

total_correct = 0
for i in range(2000):
 x = train_XX[i].reshape(1,3,224,224)
 y = train_Y[i]
 x = torch.from_numpy(x)

 x = x.cuda()
 out = net(x).cpu()
 out = out.detach().numpy()
 pred=np.argmax(out)
 if pred==y:
  total_correct += 1
 print(total_correct)

acc = total_correct / 2000.0
print('test acc:', acc)

torch.cuda.empty_cache()

將上面代碼中batch_size改為32,訓(xùn)練次數(shù)改為100輪,得到如下準(zhǔn)確率

過擬合了~

以上這篇利用PyTorch實(shí)現(xiàn)VGG16教程就是小編分享給大家的全部內(nèi)容了,希望能給大家一個參考,也希望大家多多支持腳本之家。

相關(guān)文章

  • Python讀寫及備份oracle數(shù)據(jù)庫操作示例

    Python讀寫及備份oracle數(shù)據(jù)庫操作示例

    這篇文章主要介紹了Python讀寫及備份oracle數(shù)據(jù)庫操作,結(jié)合實(shí)例形式分析了Python針對Oracle數(shù)據(jù)庫操作的相關(guān)庫安裝,以及使用cx_Oracle與pandas庫進(jìn)行Oracle數(shù)據(jù)庫的查詢、插入、備份等操作相關(guān)實(shí)現(xiàn)技巧,需要的朋友可以參考下
    2018-05-05
  • python與js進(jìn)行MD5取hash有什么不同

    python與js進(jìn)行MD5取hash有什么不同

    這篇文章主要講解得內(nèi)容是python與js進(jìn)行MD5取hash有什么不同,我們在做前端做滲透測試時會遇到一些關(guān)鍵字進(jìn)行了加密得情況,而且python和js對json進(jìn)行md5取hash,MD5結(jié)果值還不一致,下面我們就爛看看到底是哪里不同吧,需要的朋友可以參考一下
    2022-02-02
  • Python讀寫Excel表格的方法

    Python讀寫Excel表格的方法

    這篇文章主要為大家詳細(xì)介紹了Python讀寫Excel表格的方法,文中示例代碼介紹的非常詳細(xì),具有一定的參考價值,感興趣的小伙伴們可以參考一下
    2021-03-03
  • Django 實(shí)現(xiàn)下載文件功能的示例

    Django 實(shí)現(xiàn)下載文件功能的示例

    這篇文章主要介紹了Django 實(shí)現(xiàn)下載文件功能的示例,小編覺得挺不錯的,現(xiàn)在分享給大家,也給大家做個參考。一起跟隨小編過來看看吧
    2018-03-03
  • python統(tǒng)計文本字符串里單詞出現(xiàn)頻率的方法

    python統(tǒng)計文本字符串里單詞出現(xiàn)頻率的方法

    這篇文章主要介紹了python統(tǒng)計文本字符串里單詞出現(xiàn)頻率的方法,涉及Python字符串操作的相關(guān)技巧,需要的朋友可以參考下
    2015-05-05
  • 元組列表字典(莫煩python基礎(chǔ))

    元組列表字典(莫煩python基礎(chǔ))

    這篇文章主要介紹了python元組列表字典,文中通過示例代碼介紹的非常詳細(xì),對大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價值,需要的朋友們下面隨著小編來一起學(xué)習(xí)學(xué)習(xí)吧
    2019-04-04
  • Python?GUI和游戲開發(fā)從入門到實(shí)踐

    Python?GUI和游戲開發(fā)從入門到實(shí)踐

    GUI是圖形用戶界面的縮寫,圖形化的用戶界面對使用過計算機(jī)的人來說應(yīng)該都不陌生,下面這篇文章主要給大家介紹了關(guān)于Python圖形用戶界面與游戲開發(fā)的相關(guān)資料,文中通過實(shí)例代碼介紹的非常詳細(xì),需要的朋友可以參考下
    2023-05-05
  • Python中format()格式輸出全解

    Python中format()格式輸出全解

    這篇文章主要介紹了Python中format()格式輸出 ,非常不錯,具有一定的參考借鑒價值,需要的朋友可以參考下
    2019-04-04
  • python實(shí)現(xiàn)簡單中文詞頻統(tǒng)計示例

    python實(shí)現(xiàn)簡單中文詞頻統(tǒng)計示例

    本篇文章主要介紹了python實(shí)現(xiàn)簡單中文詞頻統(tǒng)計示例,小編覺得挺不錯的,現(xiàn)在分享給大家,也給大家做個參考。一起跟隨小編過來看看吧
    2017-11-11
  • 用Python給圖像算法做個簡單應(yīng)用界面

    用Python給圖像算法做個簡單應(yīng)用界面

    這篇文章主要介紹了用Python給圖像算法做個簡單應(yīng)用界面,幫助大家更好的理解和學(xué)習(xí)使用python開發(fā)gui,感興趣的朋友可以了解下
    2021-05-05

最新評論