Pytorch 使用CNN圖像分類的實(shí)現(xiàn)
需求
在4*4的圖片中,比較外圍黑色像素點(diǎn)和內(nèi)圈黑色像素點(diǎn)個(gè)數(shù)的大小將圖片分類

如上圖圖片外圍黑色像素點(diǎn)5個(gè)大于內(nèi)圈黑色像素點(diǎn)1個(gè)分為0類反之1類
想法
- 通過(guò)numpy、PIL構(gòu)造4*4的圖像數(shù)據(jù)集
- 構(gòu)造自己的數(shù)據(jù)集類
- 讀取數(shù)據(jù)集對(duì)數(shù)據(jù)集選取減少偏斜
- cnn設(shè)計(jì)因?yàn)樘卣魃?,直?*1卷積層
- 或者在4*4外圍添加padding成6*6,設(shè)計(jì)2*2的卷積核得出3*3再接上全連接層
代碼
import torch import torchvision import torchvision.transforms as transforms import numpy as np from PIL import Image
構(gòu)造數(shù)據(jù)集
import csv
import collections
import os
import shutil
def buildDataset(root,dataType,dataSize):
"""構(gòu)造數(shù)據(jù)集
構(gòu)造的圖片存到root/{dataType}Data
圖片地址和標(biāo)簽的csv文件存到 root/{dataType}DataInfo.csv
Args:
root:str
項(xiàng)目目錄
dataType:str
'train'或者‘test'
dataNum:int
數(shù)據(jù)大小
Returns:
"""
dataInfo = []
dataPath = f'{root}/{dataType}Data'
if not os.path.exists(dataPath):
os.makedirs(dataPath)
else:
shutil.rmtree(dataPath)
os.mkdir(dataPath)
for i in range(dataSize):
# 創(chuàng)建0,1 數(shù)組
imageArray=np.random.randint(0,2,(4,4))
# 計(jì)算0,1數(shù)量得到標(biāo)簽
allBlackNum = collections.Counter(imageArray.flatten())[0]
innerBlackNum = collections.Counter(imageArray[1:3,1:3].flatten())[0]
label = 0 if (allBlackNum-innerBlackNum)>innerBlackNum else 1
# 將圖片保存
path = f'{dataPath}/{i}.jpg'
dataInfo.append([path,label])
im = Image.fromarray(np.uint8(imageArray*255))
im = im.convert('1')
im.save(path)
# 將圖片地址和標(biāo)簽存入csv文件
filePath = f'{root}/{dataType}DataInfo.csv'
with open(filePath, 'w') as f:
writer = csv.writer(f)
writer.writerows(dataInfo)
root=r'/Users/null/Documents/PythonProject/Classifier'
構(gòu)造訓(xùn)練數(shù)據(jù)集
buildDataset(root,'train',20000)
構(gòu)造測(cè)試數(shù)據(jù)集
buildDataset(root,'test',10000)
讀取數(shù)據(jù)集
class MyDataset(torch.utils.data.Dataset):
def __init__(self, root, datacsv, transform=None):
super(MyDataset, self).__init__()
with open(f'{root}/{datacsv}', 'r') as f:
imgs = []
# 讀取csv信息到imgs列表
for path,label in map(lambda line:line.rstrip().split(','),f):
imgs.append((path, int(label)))
self.imgs = imgs
self.transform = transform if transform is not None else lambda x:x
def __getitem__(self, index):
path, label = self.imgs[index]
img = self.transform(Image.open(path).convert('1'))
return img, label
def __len__(self):
return len(self.imgs)
trainData=MyDataset(root = root,datacsv='trainDataInfo.csv', transform=transforms.ToTensor()) testData=MyDataset(root = root,datacsv='testDataInfo.csv', transform=transforms.ToTensor())
處理數(shù)據(jù)集使得數(shù)據(jù)集不偏斜
import itertools def chooseData(dataset,scale): # 將類別為1的排序到前面 dataset.imgs.sort(key=lambda x:x[1],reverse=True) # 獲取類別1的數(shù)目 ,取scale倍的數(shù)組,得數(shù)據(jù)不那么偏斜 trueNum =collections.Counter(itertools.chain.from_iterable(dataset.imgs))[1] end = min(trueNum*scale,len(dataset)) dataset.imgs=dataset.imgs[:end] scale = 4 chooseData(trainData,scale) chooseData(testData,scale) len(trainData),len(testData) (2250, 1122)
import torch.utils.data as Data # 超參數(shù) batchSize = 50 lr = 0.1 numEpochs = 20 trainIter = Data.DataLoader(dataset=trainData, batch_size=batchSize, shuffle=True) testIter = Data.DataLoader(dataset=testData, batch_size=batchSize)
定義模型
from torch import nn
from torch.autograd import Variable
from torch.nn import Module,Linear,Sequential,Conv2d,ReLU,ConstantPad2d
import torch.nn.functional as F
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.cnnLayers = Sequential(
# padding添加1層常數(shù)1,設(shè)定卷積核為2*2
ConstantPad2d(1, 1),
Conv2d(1, 1, kernel_size=2, stride=2,bias=True)
)
self.linearLayers = Sequential(
Linear(9, 2)
)
def forward(self, x):
x = self.cnnLayers(x)
x = x.view(x.shape[0], -1)
x = self.linearLayers(x)
return x
class Net2(Module):
def __init__(self):
super(Net2, self).__init__()
self.cnnLayers = Sequential(
Conv2d(1, 1, kernel_size=1, stride=1,bias=True)
)
self.linearLayers = Sequential(
ReLU(),
Linear(16, 2)
)
def forward(self, x):
x = self.cnnLayers(x)
x = x.view(x.shape[0], -1)
x = self.linearLayers(x)
return x
定義損失函數(shù)
# 交叉熵?fù)p失函數(shù) loss = nn.CrossEntropyLoss() loss2 = nn.CrossEntropyLoss()
定義優(yōu)化算法
net = Net() optimizer = torch.optim.SGD(net.parameters(),lr = lr)
net2 = Net2() optimizer2 = torch.optim.SGD(net2.parameters(),lr = lr)
訓(xùn)練模型
# 計(jì)算準(zhǔn)確率
def evaluateAccuracy(dataIter, net):
accSum, n = 0.0, 0
with torch.no_grad():
for X, y in dataIter:
accSum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return accSum / n
def train(net, trainIter, testIter, loss, numEpochs, batchSize,
optimizer):
for epoch in range(numEpochs):
trainLossSum, trainAccSum, n = 0.0, 0.0, 0
for X,y in trainIter:
yHat = net(X)
l = loss(yHat,y).sum()
optimizer.zero_grad()
l.backward()
optimizer.step()
# 計(jì)算訓(xùn)練準(zhǔn)確度和loss
trainLossSum += l.item()
trainAccSum += (yHat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
# 評(píng)估測(cè)試準(zhǔn)確度
testAcc = evaluateAccuracy(testIter, net)
print('epoch {:d}, loss {:.4f}, train acc {:.3f}, test acc {:.3f}'.format(epoch + 1, trainLossSum / n, trainAccSum / n, testAcc))
Net模型訓(xùn)練
train(net, trainIter, testIter, loss, numEpochs, batchSize,optimizer) epoch 1, loss 0.0128, train acc 0.667, test acc 0.667 epoch 2, loss 0.0118, train acc 0.683, test acc 0.760 epoch 3, loss 0.0104, train acc 0.742, test acc 0.807 epoch 4, loss 0.0093, train acc 0.769, test acc 0.772 epoch 5, loss 0.0085, train acc 0.797, test acc 0.745 epoch 6, loss 0.0084, train acc 0.798, test acc 0.807 epoch 7, loss 0.0082, train acc 0.804, test acc 0.816 epoch 8, loss 0.0078, train acc 0.816, test acc 0.812 epoch 9, loss 0.0077, train acc 0.818, test acc 0.817 epoch 10, loss 0.0074, train acc 0.824, test acc 0.826 epoch 11, loss 0.0072, train acc 0.836, test acc 0.819 epoch 12, loss 0.0075, train acc 0.823, test acc 0.829 epoch 13, loss 0.0071, train acc 0.839, test acc 0.797 epoch 14, loss 0.0067, train acc 0.849, test acc 0.824 epoch 15, loss 0.0069, train acc 0.848, test acc 0.843 epoch 16, loss 0.0064, train acc 0.864, test acc 0.851 epoch 17, loss 0.0062, train acc 0.867, test acc 0.780 epoch 18, loss 0.0060, train acc 0.871, test acc 0.864 epoch 19, loss 0.0057, train acc 0.881, test acc 0.890 epoch 20, loss 0.0055, train acc 0.885, test acc 0.897
Net2模型訓(xùn)練
# batchSize = 50 # lr = 0.1 # numEpochs = 15 下得出的結(jié)果 train(net2, trainIter, testIter, loss2, numEpochs, batchSize,optimizer2) epoch 1, loss 0.0119, train acc 0.638, test acc 0.676 epoch 2, loss 0.0079, train acc 0.823, test acc 0.986 epoch 3, loss 0.0046, train acc 0.987, test acc 0.977 epoch 4, loss 0.0030, train acc 0.983, test acc 0.973 epoch 5, loss 0.0023, train acc 0.981, test acc 0.976 epoch 6, loss 0.0019, train acc 0.980, test acc 0.988 epoch 7, loss 0.0016, train acc 0.984, test acc 0.984 epoch 8, loss 0.0014, train acc 0.985, test acc 0.986 epoch 9, loss 0.0013, train acc 0.987, test acc 0.992 epoch 10, loss 0.0011, train acc 0.989, test acc 0.993 epoch 11, loss 0.0010, train acc 0.989, test acc 0.996 epoch 12, loss 0.0010, train acc 0.992, test acc 0.994 epoch 13, loss 0.0009, train acc 0.993, test acc 0.994 epoch 14, loss 0.0008, train acc 0.995, test acc 0.996 epoch 15, loss 0.0008, train acc 0.994, test acc 0.998
測(cè)試
test = torch.Tensor([[[[0,0,0,0],[0,1,1,0],[0,1,1,0],[0,0,0,0]]],
[[[1,1,1,1],[1,0,0,1],[1,0,0,1],[1,1,1,1]]],
[[[0,1,0,1],[1,0,0,1],[1,0,0,1],[0,0,0,1]]],
[[[0,1,1,1],[1,0,0,1],[1,0,0,1],[0,0,0,1]]],
[[[0,0,1,1],[1,0,0,1],[1,0,0,1],[1,0,1,0]]],
[[[0,0,1,0],[0,1,0,1],[0,0,1,1],[1,0,1,0]]],
[[[1,1,1,0],[1,0,0,1],[1,0,1,1],[1,0,1,1]]]
])
target=torch.Tensor([0,1,0,1,1,0,1])
test
tensor([[[[0., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 1., 0.],
[0., 0., 0., 0.]]],
[[[1., 1., 1., 1.],
[1., 0., 0., 1.],
[1., 0., 0., 1.],
[1., 1., 1., 1.]]],
[[[0., 1., 0., 1.],
[1., 0., 0., 1.],
[1., 0., 0., 1.],
[0., 0., 0., 1.]]],
[[[0., 1., 1., 1.],
[1., 0., 0., 1.],
[1., 0., 0., 1.],
[0., 0., 0., 1.]]],
[[[0., 0., 1., 1.],
[1., 0., 0., 1.],
[1., 0., 0., 1.],
[1., 0., 1., 0.]]],
[[[0., 0., 1., 0.],
[0., 1., 0., 1.],
[0., 0., 1., 1.],
[1., 0., 1., 0.]]],
[[[1., 1., 1., 0.],
[1., 0., 0., 1.],
[1., 0., 1., 1.],
[1., 0., 1., 1.]]]])
with torch.no_grad():
output = net(test)
output2 = net2(test)
predictions =output.argmax(dim=1)
predictions2 =output2.argmax(dim=1)
# 比較結(jié)果
print(f'Net測(cè)試結(jié)果{predictions.eq(target)}')
print(f'Net2測(cè)試結(jié)果{predictions2.eq(target)}')
Net測(cè)試結(jié)果tensor([ True, True, False, True, True, True, True])
Net2測(cè)試結(jié)果tensor([False, True, False, True, True, False, True])
到此這篇關(guān)于Pytorch 使用CNN圖像分類的實(shí)現(xiàn)的文章就介紹到這了,更多相關(guān)Pytorch CNN圖像分類內(nèi)容請(qǐng)搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
- PyTorch使用CNN實(shí)現(xiàn)圖像分類
- 使用pytorch進(jìn)行圖像分類的詳細(xì)步驟
- 如何使用Pytorch完成圖像分類任務(wù)詳解
- Pytorch深度學(xué)習(xí)之實(shí)現(xiàn)病蟲(chóng)害圖像分類
- Python Pytorch深度學(xué)習(xí)之圖像分類器
- Python深度學(xué)習(xí)pytorch實(shí)現(xiàn)圖像分類數(shù)據(jù)集
- 基于PyTorch實(shí)現(xiàn)一個(gè)簡(jiǎn)單的CNN圖像分類器
- 使用PyTorch訓(xùn)練一個(gè)圖像分類器實(shí)例
- PyTorch中圖像多分類的實(shí)現(xiàn)
相關(guān)文章
詳解Python3.8+PyQt5+pyqt5-tools+Pycharm配置詳細(xì)教程
這篇文章主要介紹了Python3.8+PyQt5+pyqt5-tools+Pycharm配置詳細(xì)教程,本文給大家介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或工作具有一定的參考借鑒價(jià)值,需要的朋友參考下吧2020-11-11
django連接oracle時(shí)setting 配置方法
今天小編就為大家分享一篇django連接oracle時(shí)setting 配置方法,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過(guò)來(lái)看看吧2019-08-08
python項(xiàng)目--使用Tkinter的日歷GUI應(yīng)用程序
在 Python 中,我們可以使用 Tkinter 制作 GUI。如果你非常有想象力和創(chuàng)造力,你可以用 Tkinter 做出很多有趣的東西,希望本篇文章能夠幫到你2021-08-08
Pycharm 使用 Pipenv 新建的虛擬環(huán)境(圖文詳解)
pipenv 是 Pipfile 主要倡導(dǎo)者、requests 作者 Kenneth Reitz 寫(xiě)的一個(gè)命令行工具,主要包含了Pipfile、pip、click、requests和virtualenv。這篇文章主要介紹了Pycharm 使用 Pipenv 新建的虛擬環(huán)境的問(wèn)題,需要的朋友可以參考下2020-04-04

