Python+pytorch實(shí)現(xiàn)天氣識(shí)別
一、前期工作
環(huán)境:python3.6,1080ti,pytorch1.10(實(shí)驗(yàn)室服務(wù)器的環(huán)境)
1.設(shè)置GPU或者cpu
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
2.導(dǎo)入數(shù)據(jù)
import os,PIL,random,pathlib
data_dir = 'weather_photos/'
data_dir = pathlib.Path(data_dir)
print(data_dir)
data_paths = list(data_dir.glob('*'))
print(data_paths)
classeNames = [str(path).split("/")[1] for path in data_paths]
classeNames
二、數(shù)據(jù)預(yù)處理
數(shù)據(jù)格式設(shè)置
total_datadir = 'weather_photos/'
# 關(guān)于transforms.Compose的更多介紹可以參考:https://blog.csdn.net/qq_38251616/article/details/124878863
train_transforms = transforms.Compose([
transforms.Resize([224, 224]), # 將輸入圖片resize成統(tǒng)一尺寸
transforms.ToTensor(), # 將PIL Image或numpy.ndarray轉(zhuǎn)換為tensor,并歸一化到[0,1]之間
transforms.Normalize( # 標(biāo)準(zhǔn)化處理-->轉(zhuǎn)換為標(biāo)準(zhǔn)正太分布(高斯分布),使模型更容易收斂
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]) # 其中 mean=[0.485,0.456,0.406]與std=[0.229,0.224,0.225] 從數(shù)據(jù)集中隨機(jī)抽樣計(jì)算得到的。
])
total_data = datasets.ImageFolder(total_datadir,transform=train_transforms)
total_data
數(shù)據(jù)集劃分
train_size = int(0.8 * len(total_data)) test_size = len(total_data) - train_size train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size]) train_dataset, test_dataset
設(shè)置dataset
batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
檢查數(shù)據(jù)格式
for X, y in test_dl:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break

三、搭建網(wǎng)絡(luò)
import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential,ReLU
num_classes = 4
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
# 卷積層
self.layers = Sequential(
# 第一層
nn.Conv2d(3, 24, kernel_size=5),
nn.BatchNorm2d(24),
nn.ReLU(),
# 第二層
nn.Conv2d(24,64 , kernel_size=5),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(64, 128, kernel_size=5),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 24, kernel_size=5),
nn.BatchNorm2d(24),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Flatten(),
nn.Linear(24*50*50, 516,bias=True),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(516, 215,bias=True),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(215, num_classes,bias=True),
)
def forward(self, x):
x = self.layers(x)
return x
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = Model().to(device)
model打印網(wǎng)絡(luò)結(jié)構(gòu)

四、訓(xùn)練模型
1.設(shè)置學(xué)習(xí)率
loss_fn = nn.CrossEntropyLoss() # 創(chuàng)建損失函數(shù) learn_rate = 1e-3 # 學(xué)習(xí)率 opt = torch.optim.SGD(model.parameters(),lr=learn_rate)
2.模型訓(xùn)練
訓(xùn)練函數(shù)
# 訓(xùn)練循環(huán)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) # 訓(xùn)練集的大小,一共60000張圖片
num_batches = len(dataloader) # 批次數(shù)目,1875(60000/32)
train_loss, train_acc = 0, 0 # 初始化訓(xùn)練損失和正確率
for X, y in dataloader: # 獲取圖片及其標(biāo)簽
X, y = X.to(device), y.to(device)
# 計(jì)算預(yù)測(cè)誤差
pred = model(X) # 網(wǎng)絡(luò)輸出
loss = loss_fn(pred, y) # 計(jì)算網(wǎng)絡(luò)輸出和真實(shí)值之間的差距,targets為真實(shí)值,計(jì)算二者差值即為損失
# 反向傳播
optimizer.zero_grad() # grad屬性歸零
loss.backward() # 反向傳播
optimizer.step() # 每一步自動(dòng)更新
# 記錄acc與loss
train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
train_loss += loss.item()
train_acc /= size
train_loss /= num_batches
return train_acc, train_loss測(cè)試函數(shù)
def test (dataloader, model, loss_fn):
size = len(dataloader.dataset) # 測(cè)試集的大小,一共10000張圖片
num_batches = len(dataloader) # 批次數(shù)目,313(10000/32=312.5,向上取整)
test_loss, test_acc = 0, 0
# 當(dāng)不進(jìn)行訓(xùn)練時(shí),停止梯度更新,節(jié)省計(jì)算內(nèi)存消耗
with torch.no_grad():
for imgs, target in dataloader:
imgs, target = imgs.to(device), target.to(device)
# 計(jì)算loss
target_pred = model(imgs)
loss = loss_fn(target_pred, target)
test_loss += loss.item()
test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()
test_acc /= size
test_loss /= num_batches
return test_acc, test_loss具體訓(xùn)練代碼
epochs = 30
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):
model.train()
epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)
model.eval()
epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
train_acc.append(epoch_train_acc)
train_loss.append(epoch_train_loss)
test_acc.append(epoch_test_acc)
test_loss.append(epoch_test_loss)
template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%,Test_loss:{:.3f}')
print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss))
print('Done')
五、模型評(píng)估
1.Loss和Accuracy圖
import matplotlib.pyplot as plt
#隱藏警告
import warnings
warnings.filterwarnings("ignore") #忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用來正常顯示中文標(biāo)簽
plt.rcParams['axes.unicode_minus'] = False # 用來正常顯示負(fù)號(hào)
plt.rcParams['figure.dpi'] = 100 #分辨率
epochs_range = range(epochs)
plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
2.對(duì)結(jié)果進(jìn)行預(yù)測(cè)
import os
import json
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
img_path = "weather_photos/cloudy/cloudy1.jpg"
classes = ['cloudy', 'rain', 'shine', 'sunrise']
data_transform = transforms.Compose([
transforms.Resize([224, 224]), # 將輸入圖片resize成統(tǒng)一尺寸
transforms.ToTensor(), # 將PIL Image或numpy.ndarray轉(zhuǎn)換為tensor,并歸一化到[0,1]之間
transforms.Normalize( # 標(biāo)準(zhǔn)化處理-->轉(zhuǎn)換為標(biāo)準(zhǔn)正太分布(高斯分布),使模型更容易收斂
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]) # 其中 mean=[0.485,0.456,0.406]與std=[0.229,0.224,0.225] 從數(shù)據(jù)集中隨機(jī)抽樣計(jì)算得到的。
])
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
img = Image.open(img_path)
plt.imshow(img)
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
predict = torch.softmax(output, dim=0)
predict_cla = torch.argmax(predict).numpy()
print(classes[predict_cla])
plt.show()
if __name__ == '__main__':
main()預(yù)測(cè)結(jié)果如下:

3.總結(jié)
1.本次能主要對(duì)以下函數(shù)進(jìn)行了學(xué)習(xí)
| transforms.Compose | 針對(duì)數(shù)據(jù)轉(zhuǎn)換,例如尺寸,類型 |
| datasets.ImageFolder | 結(jié)合上面這個(gè)對(duì)某文件夾下數(shù)據(jù)處理 |
| torch.utils.data.DataLoader | 設(shè)置dataset |
以上就是Python+pytorch實(shí)現(xiàn)天氣識(shí)別的詳細(xì)內(nèi)容,更多關(guān)于Python pytorch天氣識(shí)別的資料請(qǐng)關(guān)注腳本之家其它相關(guān)文章!
- python機(jī)器學(xué)習(xí)pytorch?張量基礎(chǔ)教程
- python機(jī)器學(xué)習(xí)pytorch自定義數(shù)據(jù)加載器
- Python+Pytorch實(shí)戰(zhàn)之彩色圖片識(shí)別
- Python使用pytorch動(dòng)手實(shí)現(xiàn)LSTM模塊
- python神經(jīng)網(wǎng)絡(luò)pytorch中BN運(yùn)算操作自實(shí)現(xiàn)
- python神經(jīng)網(wǎng)絡(luò)Pytorch中Tensorboard函數(shù)使用
- python神經(jīng)網(wǎng)絡(luò)學(xué)習(xí)利用PyTorch進(jìn)行回歸運(yùn)算
- Pytorch相關(guān)知識(shí)介紹與應(yīng)用
相關(guān)文章
python tkinter Entry控件的焦點(diǎn)移動(dòng)操作
這篇文章主要介紹了python tkinter Entry控件的焦點(diǎn)移動(dòng)操作,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。如有錯(cuò)誤或未考慮完全的地方,望不吝賜教2021-05-05
python中的selenium安裝的步驟(瀏覽器自動(dòng)化測(cè)試框架)
這篇文章主要介紹了python中的selenium安裝的步驟,文中通過示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面隨著小編來一起學(xué)習(xí)學(xué)習(xí)吧2020-03-03
Python 注解方式實(shí)現(xiàn)緩存數(shù)據(jù)詳解
這篇文章主要介紹了Python 注解方式實(shí)現(xiàn)緩存數(shù),文中通過示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友可以參考下2021-10-10
使用Python?Socket實(shí)現(xiàn)搭建HTTP協(xié)議
網(wǎng)絡(luò)編程中,了解底層的通信機(jī)制是極其重要的,本文將帶領(lǐng)大家深入探索如何使用Python的socket庫來實(shí)現(xiàn)一個(gè)簡(jiǎn)單的HTTP協(xié)議,感興趣的可以了解下2024-02-02
python目標(biāo)檢測(cè)SSD算法訓(xùn)練部分源碼詳解
這篇文章主要為大家介紹了python目標(biāo)檢測(cè)SSD算法訓(xùn)練部分源碼詳解,有需要的朋友可以借鑒參考下,希望能夠有所幫助,祝大家多多進(jìn)步,早日升職加薪2022-05-05
Django項(xiàng)目使用ckeditor詳解(不使用admin)
今天小編就為大家分享一篇Django項(xiàng)目使用ckeditor詳解(不使用admin),具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過來看看吧2019-12-12
Python PaddleNLP實(shí)現(xiàn)自動(dòng)生成虎年藏頭詩
這篇文章主要介紹了利用Python PaddleNLP實(shí)現(xiàn)自動(dòng)生成虎年藏頭詩功能,文中的示例代碼講解詳細(xì),感興趣的同學(xué)可以跟隨小編一起試一試2022-01-01

