欧美bbbwbbbw肥妇,免费乱码人妻系列日韩,一级黄片

keras分類(lèi)之二分類(lèi)實(shí)例(Cat and dog)

 更新時(shí)間:2020年07月09日 09:32:06   作者:mr_liyonghong  
這篇文章主要介紹了keras分類(lèi)之二分類(lèi)實(shí)例(Cat and dog),具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過(guò)來(lái)看看吧

1. 數(shù)據(jù)準(zhǔn)備

在文件夾下分別建立訓(xùn)練目錄train,驗(yàn)證目錄validation,測(cè)試目錄test,每個(gè)目錄下建立dogs和cats兩個(gè)目錄,在dogs和cats目錄下分別放入拍攝的狗和貓的圖片,圖片的大小可以不一樣。

2. 數(shù)據(jù)讀取

# 存儲(chǔ)數(shù)據(jù)集的目錄
base_dir = 'E:/python learn/dog_and_cat/data/'
 
# 訓(xùn)練、驗(yàn)證數(shù)據(jù)集的目錄
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
 
# 貓訓(xùn)練圖片所在目錄
train_cats_dir = os.path.join(train_dir, 'cats')
 
# 狗訓(xùn)練圖片所在目錄
train_dogs_dir = os.path.join(train_dir, 'dogs')
 
# 貓驗(yàn)證圖片所在目錄
validation_cats_dir = os.path.join(validation_dir, 'cats')
 
# 狗驗(yàn)證數(shù)據(jù)集所在目錄
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
 
print('total training cat images:', len(os.listdir(train_cats_dir))) 
print('total training dog images:', len(os.listdir(train_dogs_dir))) 
print('total validation cat images:', len(os.listdir(validation_cats_dir))) 
print('total validation dog images:', len(os.listdir(validation_dogs_dir)))

3. 模型建立

# 搭建模型
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',
         input_shape=(150, 150, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
 
print(model.summary())
 
model.compile(loss='binary_crossentropy',
       optimizer=RMSprop(lr=1e-4),
       metrics=['acc'])

4. 模型訓(xùn)練

train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
 
train_generator = train_datagen.flow_from_directory(
  train_dir, # target directory
  target_size=(150, 150), # resize圖片
  batch_size=20,
  class_mode='binary'
)
 
validation_generator = test_datagen.flow_from_directory(
  validation_dir,
  target_size=(150, 150),
  batch_size=20,
  class_mode='binary'
)
 
for data_batch, labels_batch in train_generator:
  print('data batch shape:', data_batch.shape)
  print('labels batch shape:', labels_batch.shape)
  break
 
hist = model.fit_generator(
  train_generator,
  steps_per_epoch=100,
  epochs=10,
  validation_data=validation_generator,
  validation_steps=50
)
 
model.save('cats_and_dogs_small_1.h5')

5. 模型評(píng)估

acc = hist.history['acc']
val_acc = hist.history['val_acc']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
 
epochs = range(len(acc))
 
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
 
plt.legend()
plt.figure()
 
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.legend()
plt.show()

6. 預(yù)測(cè)

imagename = 'E:/python learn/dog_and_cat/data/validation/dogs/dog.2026.jpg'
test_image = image.load_img(imagename, target_size = (150, 150))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = model.predict(test_image)
 
if result[0][0] == 1:
  prediction ='dog'
else:
  prediction ='cat'
  
print(prediction)

代碼在spyder下運(yùn)行正常,一般情況下,可以將文件分為兩個(gè)部分,一部分為T(mén)rain.py,包含深度學(xué)習(xí)模型建立、訓(xùn)練和模型的存儲(chǔ),另一部分Predict.py,包含模型的讀取,評(píng)價(jià)和預(yù)測(cè)

補(bǔ)充知識(shí):keras 貓狗大戰(zhàn)自搭網(wǎng)絡(luò)以及vgg16應(yīng)用

導(dǎo)入模塊

import os
import numpy as np
import tensorflow as tf
import random
import seaborn as sns
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input,BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import VGG16, preprocess_input
 
from sklearn.model_selection import train_test_split

加載數(shù)據(jù)集

def read_and_process_image(data_dir,width=64, height=64, channels=3, preprocess=False):
  train_images= [data_dir + i for i in os.listdir(data_dir)]
  
  random.shuffle(train_images)
  
  def read_image(file_path, preprocess):
    img = image.load_img(file_path, target_size=(height, width))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    # if preprocess:
      # x = preprocess_input(x)
    return x
  
  def prep_data(images, proprocess):
    count = len(images)
    data = np.ndarray((count, height, width, channels), dtype = np.float32)
    
    for i, image_file in enumerate(images):
      image = read_image(image_file, preprocess)
      data[i] = image
    
    return data
  
  def read_labels(file_path):
    labels = []
    for i in file_path:
      label = 1 if 'dog' in i else 0
      labels.append(label)
    
    return labels
  
  X = prep_data(train_images, preprocess)
  labels = read_labels(train_images)
  
  assert X.shape[0] == len(labels)
  print("Train shape: {}".format(X.shape))
  return X, labels

讀取數(shù)據(jù)集

# 讀取圖片
WIDTH = 150
HEIGHT = 150
CHANNELS = 3
X, y = read_and_process_image('D:\\Python_Project\\train\\',width=WIDTH, height=HEIGHT, channels=CHANNELS)

查看數(shù)據(jù)集信息

# 統(tǒng)計(jì)y
sns.countplot(y)
 
# 顯示圖片
def show_cats_and_dogs(X, idx):
  plt.figure(figsize=(10,5), frameon=True)
  img = X[idx,:,:,::-1]
  img = img/255
  plt.imshow(img)
  plt.show()
 
 
for idx in range(0,3):
  show_cats_and_dogs(X, idx)
 
train_X = X[0:17500,:,:,:]
train_y = y[0:17500]
test_X = X[17500:25000,:,:,:]
test_y = y[17500:25000]
train_X.shape
test_X.shape

自定義神經(jīng)網(wǎng)絡(luò)層數(shù)

input_layer = Input((WIDTH, HEIGHT, CHANNELS))
# 第一層
z = input_layer
z = Conv2D(64, (3,3))(z)
z = BatchNormalization()(z)
z = Activation('relu')(z)
z = MaxPooling2D(pool_size = (2,2))(z)
 
z = Conv2D(64, (3,3))(z)
z = BatchNormalization()(z)
z = Activation('relu')(z)
z = MaxPooling2D(pool_size = (2,2))(z)
 
z = Conv2D(128, (3,3))(z)
z = BatchNormalization()(z)
z = Activation('relu')(z)
z = MaxPooling2D(pool_size = (2,2))(z)
 
z = Conv2D(128, (3,3))(z)
z = BatchNormalization()(z)
z = Activation('relu')(z)
z = MaxPooling2D(pool_size = (2,2))(z)
 
z = Flatten()(z)
z = Dense(64)(z)
z = BatchNormalization()(z)
z = Activation('relu')(z)
z = Dropout(0.5)(z)
z = Dense(1)(z)
z = Activation('sigmoid')(z)
 
model = Model(input_layer, z)
 
model.compile(
  optimizer = keras.optimizers.RMSprop(),
  loss = keras.losses.binary_crossentropy,
  metrics = [keras.metrics.binary_accuracy]
)
 
model.summary()

訓(xùn)練模型

history = model.fit(train_X,train_y, validation_data=(test_X, test_y),epochs=10,batch_size=128,verbose=True)
score = model.evaluate(test_X, test_y, verbose=0)
print("Large CNN Error: %.2f%%" %(100-score[1]*100))

復(fù)用vgg16模型

def vgg16_model(input_shape= (HEIGHT,WIDTH,CHANNELS)):
  vgg16 = VGG16(include_top=False, weights='imagenet',input_shape=input_shape)
  
  for layer in vgg16.layers:
    layer.trainable = False
  last = vgg16.output
  # 后面加入自己的模型
  x = Flatten()(last)
  x = Dense(256, activation='relu')(x)
  x = Dropout(0.5)(x)
  x = Dense(256, activation='relu')(x)
  x = Dropout(0.5)(x)
  x = Dense(1, activation='sigmoid')(x)
  
  model = Model(inputs=vgg16.input, outputs=x)
  
  return model

編譯模型

model_vgg16 = vgg16_model()
model_vgg16.summary()
model_vgg16.compile(loss='binary_crossentropy',optimizer = Adam(0.0001), metrics = ['accuracy'])

訓(xùn)練模型

# 訓(xùn)練模型
history = model_vgg16.fit(train_X,train_y, validation_data=(test_X, test_y),epochs=5,batch_size=128,verbose=True)
score = model_vgg16.evaluate(test_X, test_y, verbose=0)
print("Large CNN Error: %.2f%%" %(100-score[1]*100))

以上這篇keras分類(lèi)之二分類(lèi)實(shí)例(Cat and dog)就是小編分享給大家的全部?jī)?nèi)容了,希望能給大家一個(gè)參考,也希望大家多多支持腳本之家。

相關(guān)文章

最新評(píng)論