解讀tf.keras.layers模塊中的函數(shù)
tf.keras.layers模塊中的函數(shù)
from __future__ import print_function as _print_function import sys as _sys from . import experimental from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_layer import Input from tensorflow.python.keras.engine.input_layer import InputLayer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.feature_column.dense_features_v2 import DenseFeatures from tensorflow.python.keras.layers.advanced_activations import ELU from tensorflow.python.keras.layers.advanced_activations import LeakyReLU from tensorflow.python.keras.layers.advanced_activations import PReLU from tensorflow.python.keras.layers.advanced_activations import ReLU from tensorflow.python.keras.layers.advanced_activations import Softmax from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU from tensorflow.python.keras.layers.convolutional import Conv1D from tensorflow.python.keras.layers.convolutional import Conv1D as Convolution1D from tensorflow.python.keras.layers.convolutional import Conv1DTranspose from tensorflow.python.keras.layers.convolutional import Conv1DTranspose as Convolution1DTranspose from tensorflow.python.keras.layers.convolutional import Conv2D from tensorflow.python.keras.layers.convolutional import Conv2D as Convolution2D from tensorflow.python.keras.layers.convolutional import Conv2DTranspose from tensorflow.python.keras.layers.convolutional import Conv2DTranspose as Convolution2DTranspose from tensorflow.python.keras.layers.convolutional import Conv3D from tensorflow.python.keras.layers.convolutional import Conv3D as Convolution3D from tensorflow.python.keras.layers.convolutional import Conv3DTranspose from tensorflow.python.keras.layers.convolutional import Conv3DTranspose as Convolution3DTranspose from tensorflow.python.keras.layers.convolutional import Cropping1D from tensorflow.python.keras.layers.convolutional import Cropping2D from tensorflow.python.keras.layers.convolutional import Cropping3D from tensorflow.python.keras.layers.convolutional import DepthwiseConv2D from tensorflow.python.keras.layers.convolutional import SeparableConv1D from tensorflow.python.keras.layers.convolutional import SeparableConv1D as SeparableConvolution1D from tensorflow.python.keras.layers.convolutional import SeparableConv2D from tensorflow.python.keras.layers.convolutional import SeparableConv2D as SeparableConvolution2D from tensorflow.python.keras.layers.convolutional import UpSampling1D from tensorflow.python.keras.layers.convolutional import UpSampling2D from tensorflow.python.keras.layers.convolutional import UpSampling3D from tensorflow.python.keras.layers.convolutional import ZeroPadding1D from tensorflow.python.keras.layers.convolutional import ZeroPadding2D from tensorflow.python.keras.layers.convolutional import ZeroPadding3D from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D from tensorflow.python.keras.layers.core import Activation from tensorflow.python.keras.layers.core import ActivityRegularization from tensorflow.python.keras.layers.core import Dense from tensorflow.python.keras.layers.core import Dropout from tensorflow.python.keras.layers.core import Flatten from tensorflow.python.keras.layers.core import Lambda from tensorflow.python.keras.layers.core import Masking from tensorflow.python.keras.layers.core import Permute from tensorflow.python.keras.layers.core import RepeatVector from tensorflow.python.keras.layers.core import Reshape from tensorflow.python.keras.layers.core import SpatialDropout1D from tensorflow.python.keras.layers.core import SpatialDropout2D from tensorflow.python.keras.layers.core import SpatialDropout3D from tensorflow.python.keras.layers.dense_attention import AdditiveAttention from tensorflow.python.keras.layers.dense_attention import Attention from tensorflow.python.keras.layers.embeddings import Embedding from tensorflow.python.keras.layers.local import LocallyConnected1D from tensorflow.python.keras.layers.local import LocallyConnected2D from tensorflow.python.keras.layers.merge import Add from tensorflow.python.keras.layers.merge import Average from tensorflow.python.keras.layers.merge import Concatenate from tensorflow.python.keras.layers.merge import Dot from tensorflow.python.keras.layers.merge import Maximum from tensorflow.python.keras.layers.merge import Minimum from tensorflow.python.keras.layers.merge import Multiply from tensorflow.python.keras.layers.merge import Subtract from tensorflow.python.keras.layers.merge import add from tensorflow.python.keras.layers.merge import average from tensorflow.python.keras.layers.merge import concatenate from tensorflow.python.keras.layers.merge import dot from tensorflow.python.keras.layers.merge import maximum from tensorflow.python.keras.layers.merge import minimum from tensorflow.python.keras.layers.merge import multiply from tensorflow.python.keras.layers.merge import subtract from tensorflow.python.keras.layers.noise import AlphaDropout from tensorflow.python.keras.layers.noise import GaussianDropout from tensorflow.python.keras.layers.noise import GaussianNoise from tensorflow.python.keras.layers.normalization import LayerNormalization from tensorflow.python.keras.layers.normalization_v2 import BatchNormalization from tensorflow.python.keras.layers.pooling import AveragePooling1D from tensorflow.python.keras.layers.pooling import AveragePooling1D as AvgPool1D from tensorflow.python.keras.layers.pooling import AveragePooling2D from tensorflow.python.keras.layers.pooling import AveragePooling2D as AvgPool2D from tensorflow.python.keras.layers.pooling import AveragePooling3D from tensorflow.python.keras.layers.pooling import AveragePooling3D as AvgPool3D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D as GlobalAvgPool1D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D as GlobalAvgPool2D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D as GlobalAvgPool3D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D as GlobalMaxPool1D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D as GlobalMaxPool2D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D as GlobalMaxPool3D from tensorflow.python.keras.layers.pooling import MaxPooling1D from tensorflow.python.keras.layers.pooling import MaxPooling1D as MaxPool1D from tensorflow.python.keras.layers.pooling import MaxPooling2D from tensorflow.python.keras.layers.pooling import MaxPooling2D as MaxPool2D from tensorflow.python.keras.layers.pooling import MaxPooling3D from tensorflow.python.keras.layers.pooling import MaxPooling3D as MaxPool3D from tensorflow.python.keras.layers.recurrent import AbstractRNNCell from tensorflow.python.keras.layers.recurrent import RNN from tensorflow.python.keras.layers.recurrent import SimpleRNN from tensorflow.python.keras.layers.recurrent import SimpleRNNCell from tensorflow.python.keras.layers.recurrent import StackedRNNCells from tensorflow.python.keras.layers.recurrent_v2 import GRU from tensorflow.python.keras.layers.recurrent_v2 import GRUCell from tensorflow.python.keras.layers.recurrent_v2 import LSTM from tensorflow.python.keras.layers.recurrent_v2 import LSTMCell from tensorflow.python.keras.layers.serialization import deserialize from tensorflow.python.keras.layers.serialization import serialize from tensorflow.python.keras.layers.wrappers import Bidirectional from tensorflow.python.keras.layers.wrappers import TimeDistributed from tensorflow.python.keras.layers.wrappers import Wrapper del _print_function
匯總tf.keras模型層layers
tf.keras.layers.Dense()
:密集連接層。參數(shù)個(gè)數(shù) = 輸入層特征數(shù)× 輸出層特征數(shù)(weight)+ 輸出層特征數(shù)(bias)tf.keras.layers.Activation()
:激活函數(shù)層。一般放在Dense層后面,等價(jià)于在Dense層中指定activation。tf.keras.layers.Dropout()
:隨機(jī)置零層。訓(xùn)練期間以一定幾率將輸入置0,一種正則化手段。tf.keras.layers.BatchNormalization()
:批標(biāo)準(zhǔn)化層。通過線性變換將輸入批次縮放平移到穩(wěn)定的均值和標(biāo)準(zhǔn)差??梢栽鰪?qiáng)模型對(duì)輸入不同分布的適應(yīng)性,加快模型訓(xùn)練速度,有輕微正則化效果。一般在激活函數(shù)之前使用。tf.keras.layers.SpatialDropout2D()
:空間隨機(jī)置零層。訓(xùn)練期間以一定幾率將整個(gè)特征圖置0,一種正則化手段,有利于避免特征圖之間過高的相關(guān)性。tf.keras.layers.Input()
:輸入層。通常使用Functional API方式構(gòu)建模型時(shí)作為第一層。tf.keras.layers.DenseFeature()
:特征列接入層,用于接收一個(gè)特征列列表并產(chǎn)生一個(gè)密集連接層。tf.keras.layers.Flatten()
:壓平層,用于將多維張量壓成一維。tf.keras.layers.Reshape()
:形狀重塑層,改變輸入張量的形狀。tf.keras.layers.Concatenate()
:拼接層,將多個(gè)張量在某個(gè)維度上拼接。tf.keras.layers.Add()
:加法層。tf.keras.layers.Subtract()
:減法層。tf.keras.layers.Maximum()
:取最大值層。tf.keras.layers.Minimum()
:取最小值層。
卷積網(wǎng)絡(luò)相關(guān)層
tf.keras.layers.Conv1D()
:普通一維卷積,常用于文本。參數(shù)個(gè)數(shù) = 輸入通道數(shù)×卷積核尺寸(如3)×卷積核個(gè)數(shù)tf.keras.layers.Conv2D()
:普通二維卷積,常用于圖像。參數(shù)個(gè)數(shù) = 輸入通道數(shù)×卷積核尺寸(如3乘3)×卷積核個(gè)數(shù)tf.keras.layers.Conv3D()
:普通三維卷積,常用于視頻。參數(shù)個(gè)數(shù) = 輸入通道數(shù)×卷積核尺寸(如3乘3乘3)×卷積核個(gè)數(shù)tf.keras.layers.SeparableConv2D()
:二維深度可分離卷積層。不同于普通卷積同時(shí)對(duì)區(qū)域和通道操作,深度可分離卷積先操作區(qū)域,再操作通道。即先對(duì)每個(gè)通道做獨(dú)立卷即先操作區(qū)域,再用1乘1卷積跨通道組合即再操作通道。參數(shù)個(gè)數(shù) = 輸入通道數(shù)×卷積核尺寸 + 輸入通道數(shù)×1×1×輸出通道數(shù)。深度可分離卷積的參數(shù)數(shù)量一般遠(yuǎn)小于普通卷積,效果一般也更好。tf.keras.layers.DepthwiseConv2D()
:二維深度卷積層。僅有SeparableConv2D前半部分操作,即只操作區(qū)域,不操作通道,一般輸出通道數(shù)和輸入通道數(shù)相同,但也可以通過設(shè)置depth_multiplier讓輸出通道為輸入通道的若干倍數(shù)。輸出通道數(shù) = 輸入通道數(shù) × depth_multiplier。參數(shù)個(gè)數(shù) = 輸入通道數(shù)×卷積核尺寸× depth_multiplier。tf.keras.layers.Conv2DTranspose()
:二維卷積轉(zhuǎn)置層,俗稱反卷積層。并非卷積的逆操作,但在卷積核相同的情況下,當(dāng)其輸入尺寸是卷積操作輸出尺寸的情況下,卷積轉(zhuǎn)置的輸出尺寸恰好是卷積操作的輸入尺寸。tf.keras.layers.LocallyConnected2D()
:二維局部連接層。類似Conv2D,唯一的差別是沒有空間上的權(quán)值共享,所以其參數(shù)個(gè)數(shù)遠(yuǎn)高于二維卷積。tf.keras.layers.MaxPooling2D()
:二維最大池化層。也稱作下采樣層。池化層無(wú)參數(shù),主要作用是降維。tf.keras.layers.AveragePooling2D()
:二維平均池化層。tf.keras.layers.GlobalMaxPool2D()
:全局最大池化層。每個(gè)通道僅保留一個(gè)值。一般從卷積層過渡到全連接層時(shí)使用,是Flatten的替代方案。tf.keras.layers.GlobalAvgPool2D()
:全局平均池化層。每個(gè)通道僅保留一個(gè)值。
示例代碼一、搭建LeNet-5神經(jīng)網(wǎng)絡(luò)
import tensorflow as tf from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses # 1.數(shù)據(jù)集準(zhǔn)備 (x, y), (x_val, y_val) = datasets.mnist.load_data() # 加載數(shù)據(jù)集,返回的是兩個(gè)元組,分別表示訓(xùn)練集和測(cè)試集 x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. # 轉(zhuǎn)換為張量,并縮放到0~1 y = tf.convert_to_tensor(y, dtype=tf.int32) # 轉(zhuǎn)換為張量(標(biāo)簽) print(x.shape, y.shape) train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) # 構(gòu)建數(shù)據(jù)集對(duì)象 train_dataset = train_dataset.batch(32).repeat(10) # 設(shè)置批量訓(xùn)練的batch為32,要將訓(xùn)練集重復(fù)訓(xùn)練10遍 # 2.搭建網(wǎng)絡(luò) network = Sequential([ # 搭建網(wǎng)絡(luò)容器 layers.Conv2D(6, kernel_size=3, strides=1), # 第一個(gè)卷積層,6個(gè)3*3*1卷積核 layers.MaxPooling2D(pool_size=2, strides=2), # 池化層,卷積核2*2,步長(zhǎng)2 layers.ReLU(), # 激活函數(shù) layers.Conv2D(16, kernel_size=3, strides=1), # 第二個(gè)卷積層,16個(gè)3*3*6卷積核 layers.MaxPooling2D(pool_size=2, strides=2), # 池化層 layers.ReLU(), # 激活函數(shù) layers.Flatten(), # 拉直,方便全連接層處理 layers.Dense(120, activation='relu'), # 全連接層,120個(gè)節(jié)點(diǎn) layers.Dense(84, activation='relu'), # 全連接層,84個(gè)節(jié)點(diǎn) layers.Dense(10) # 輸出層,10個(gè)節(jié)點(diǎn) ]) network.build(input_shape=(None, 28, 28, 1)) # 定義輸入,batch_size=32,輸入圖片大小是28*28,通道數(shù)為1。 network.summary() # 顯示出每層的待優(yōu)化參數(shù)量 # 3.模型訓(xùn)練(計(jì)算梯度,迭代更新網(wǎng)絡(luò)參數(shù)) optimizer = optimizers.SGD(lr=0.01) # 聲明采用批量隨機(jī)梯度下降方法,學(xué)習(xí)率=0.01 acc_meter = metrics.Accuracy() # 新建accuracy測(cè)量器 for step, (x, y) in enumerate(train_dataset): # 一次輸入batch組數(shù)據(jù)進(jìn)行訓(xùn)練 with tf.GradientTape() as tape: # 構(gòu)建梯度記錄環(huán)境 x = tf.reshape(x, (32, 28, 28, 1)) # 將輸入拉直,[b,28,28]->[b,784] # x = tf.extand_dims(x, axis=3) out = network(x) # 輸出[b, 10] y_onehot = tf.one_hot(y, depth=10) # one-hot編碼 loss = tf.square(out - y_onehot) loss = tf.reduce_sum(loss) / 32 # 定義均方差損失函數(shù),注意此處的32對(duì)應(yīng)為batch的大小 grads = tape.gradient(loss, network.trainable_variables) # 計(jì)算網(wǎng)絡(luò)中各個(gè)參數(shù)的梯度 optimizer.apply_gradients(zip(grads, network.trainable_variables)) # 更新網(wǎng)絡(luò)參數(shù) acc_meter.update_state(tf.argmax(out, axis=1), y) # 比較預(yù)測(cè)值與標(biāo)簽,并計(jì)算精確度(寫入數(shù)據(jù),進(jìn)行求精度) if step % 200 == 0: # 每200個(gè)step,打印一次結(jié)果 print('Step', step, ': Loss is: ', float(loss), ' Accuracy: ', acc_meter.result().numpy()) # 讀取數(shù)據(jù) acc_meter.reset_states() # 清零測(cè)量器l
總結(jié)
以上為個(gè)人經(jīng)驗(yàn),希望能給大家一個(gè)參考,也希望大家多多支持腳本之家。
相關(guān)文章
Pandas 類型轉(zhuǎn)換astype()的實(shí)現(xiàn)
本文主要介紹了Pandas 類型轉(zhuǎn)換astype()的實(shí)現(xiàn),文中通過示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面隨著小編來一起學(xué)習(xí)學(xué)習(xí)吧2022-07-073個(gè)Python?SQLAlchemy數(shù)據(jù)庫(kù)操作功能詳解
Python?SQLAlchemy?是一個(gè)強(qiáng)大且多功能的?Python?SQL?工具包和對(duì)象關(guān)系映射?(ORM)?系統(tǒng),提供了一整套眾所周知的企業(yè)級(jí)持久性模式,本文為大家整理了它必須了解的3個(gè)數(shù)據(jù)庫(kù)操作功能,希望對(duì)大家有所幫助2023-09-09關(guān)于Python中flask-httpauth庫(kù)用法詳解
這篇文章主要介紹了關(guān)于Python中flask-httpauth庫(kù)用法詳解,Flask-HTTPAuth是一個(gè)?Flask?擴(kuò)展,它簡(jiǎn)化了?HTTP?身份驗(yàn)證與?Flask?路由的使用,需要的朋友可以參考下2023-04-04利用python、tensorflow、opencv、pyqt5實(shí)現(xiàn)人臉實(shí)時(shí)簽到系統(tǒng)
這篇文章主要介紹了利用python、tensorflow、opencv、pyqt5實(shí)現(xiàn)人臉實(shí)時(shí)簽到系統(tǒng),本文給大家介紹的非常詳細(xì),具有一定的參考借鑒價(jià)值,需要的朋友可以參考下2019-09-09使用Python簡(jiǎn)單的實(shí)現(xiàn)樹莓派的WEB控制
這篇文章主要介紹了使用Python簡(jiǎn)單的實(shí)現(xiàn)樹莓派的WEB控制的相關(guān)資料,需要的朋友可以參考下2016-02-02python3 與python2 異常處理的區(qū)別與聯(lián)系
這篇文章主要介紹了python3 與python2 異常處理的區(qū)別與聯(lián)系的相關(guān)資料,需要的朋友可以參考下2016-06-06使用python怎樣產(chǎn)生10個(gè)不同的隨機(jī)數(shù)
這篇文章主要介紹了使用python實(shí)現(xiàn)產(chǎn)生10個(gè)不同的隨機(jī)數(shù)方式,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。如有錯(cuò)誤或未考慮完全的地方,望不吝賜教2022-07-07