資源簡介
這里面包含整個基于神經(jīng)網(wǎng)絡深度學習 實現(xiàn)手寫體識別項目,包括原始數(shù)據(jù) 訓練數(shù)據(jù) 訓練模型 測試數(shù)據(jù)等 總共是三種方式實現(xiàn)這是第二種和模型
代碼片段和文件信息
#?coding:?utf-8
import?tensorflow?as?tf
from?tensorflow.examples.tutorials.mnist?import?input_data
#?載入數(shù)據(jù)集
mnist?=?input_data.read_data_sets(‘MNIST_data‘?one_hot=True)
#?每個批次的大小
batch_size?=?100
#?計算一共有多少個批次
n_batch?=?mnist.train.num_examples?//?batch_size
#?參數(shù)概要
def?variable_summaries(var):
????with?tf.name_scope(‘summaries‘):
????????mean?=?tf.reduce_mean(var)
????????tf.summary.scalar(‘mean‘?mean)??#?平均值
????????with?tf.name_scope(‘stddev‘):
????????????stddev?=?tf.sqrt(tf.reduce_mean(tf.square(var?-?mean)))
????????tf.summary.scalar(‘stddev‘?stddev)??#?標準差
????????tf.summary.scalar(‘max‘?tf.reduce_max(var))??#?最大值
????????tf.summary.scalar(‘min‘?tf.reduce_min(var))??#?最小值
????????tf.summary.histogram(‘histogram‘?var)??#?直方圖
#?定義初始化權(quán)值函數(shù)
def?weight_variable(shape?name):
????initial?=?tf.truncated_normal(shape?stddev=0.1)
????return?tf.Variable(initial?name=name)
#?定義初始化偏置函數(shù)
def?bias_variable(shape?name):
????initial?=?tf.constant(0.1?shape=shape)
????return?tf.Variable(initial?name=name)
#?卷積層
def?conv2d(x?W):
????return?tf.nn.conv2d(x?W?strides=[1?1?1?1]?padding=‘SAME‘)
#?池化層
def?max_pool_2x2(x):
????return?tf.nn.max_pool(x?ksize=[1?2?2?1]?strides=[1?2?2?1]?padding=‘SAME‘)
#?輸入層
#?命名空間
with?tf.name_scope(‘input‘):
????#?定義兩個placeholder
????x?=?tf.placeholder(tf.float32?[None?784]?name=‘x-input‘)
????y?=?tf.placeholder(tf.float32?[None?10]?name=‘y-input‘)
????with?tf.name_scope(‘x_image‘):
????????#?改變x的格式轉(zhuǎn)為4D的向量[batch?in_height?in_width?in_channels]‘
????????x_image?=?tf.reshape(x?[-1?28?28?1]?name=‘x_image‘)
#?第一個卷積層,激活層,池化層
with?tf.name_scope(‘Conv1‘):
????#?初始化第一個卷積層的權(quán)值和偏置
????with?tf.name_scope(‘W_conv1‘):
????????W_conv1?=?weight_variable([5?5?1?32]?name=‘W_conv1‘)??#?5*5的采樣窗口,32個卷積核從1個平面抽取特征
????with?tf.name_scope(‘b_conv1‘):
????????b_conv1?=?bias_variable([32]?name=‘b_conv1‘)??#?每一個卷積核一個偏置值
????#?把x_image和權(quán)值向量進行卷積,再加上偏置值,然后應用于relu激活函數(shù)
????with?tf.name_scope(‘conv2d_1‘):
????????conv2d_1?=?conv2d(x_image?W_conv1)?+?b_conv1
????with?tf.name_scope(‘relu‘):
????????h_conv1?=?tf.nn.relu(conv2d_1)
????with?tf.name_scope(‘h_pool1‘):
????????h_pool1?=?max_pool_2x2(h_conv1)??#?進行max-pooling池化
#?第二個卷積層,激活層,池化層
with?tf.name_scope(‘Conv2‘):
????#?初始化第二個卷積層的權(quán)值和偏置
????with?tf.name_scope(‘W_conv2‘):
????????W_conv2?=?weight_variable([5?5?32?64]?name=‘W_conv2‘)??#?5*5的采樣窗口,64個卷積核從32個平面抽取特征
????with?tf.name_scope(‘b_conv2‘):
????????b_conv2?=?bias_variable([64]?name=‘b_conv2‘)??#?每一個卷積核一個偏置值
????#?把h_pool1和權(quán)值向量進行卷積,再加上偏置值,然后應用于relu激活函數(shù)
????with?tf.name_scope(‘conv2d_2‘):
????????conv2d_2?=?conv2d(h_pool1?W_conv2)?+?b_conv2
????with?tf.name_scope(‘relu‘):
????????h_conv2?=?tf.nn.relu(conv2d_2)
????with?tf.name_scope(‘h_pool2‘):
????????h_pool2?=?max_pool_2x2(h_conv2)??#?進行max-pooling
#?第一個全連接層
with?tf.name_scope(‘fc1‘):
????#?初始化第一個全連接層的權(quán)值
????with?tf.name_scope(‘W_fc1‘):
????????W_fc1?=?wei
?屬性????????????大小?????日期????時間???名稱
-----------?---------??----------?-----??----
?????目錄???????????0??2019-04-21?18:51??mnist2\
?????目錄???????????0??2019-04-21?18:51??mnist2\logs\
?????目錄???????????0??2019-04-21?18:51??mnist2\logs\test\
?????文件??????262906??2019-04-21?18:23??mnist2\logs\test\events.out.tfevents.1555841857.XPS-15
?????目錄???????????0??2019-04-21?18:51??mnist2\logs\train\
?????文件??????262906??2019-04-21?18:23??mnist2\logs\train\events.out.tfevents.1555841857.XPS-15
?????文件????????7297??2019-04-21?18:17??mnist2\mnist2.py
?????目錄???????????0??2019-04-21?18:51??mnist2\MNIST_data\
?????文件?????1648877??2019-04-21?18:17??mnist2\MNIST_data\t10k-images-idx3-ubyte.gz
?????文件????????4542??2019-04-21?18:17??mnist2\MNIST_data\t10k-labels-idx1-ubyte.gz
?????文件?????9912422??2019-04-21?18:17??mnist2\MNIST_data\train-images-idx3-ubyte.gz
?????文件???????28881??2019-04-21?18:17??mnist2\MNIST_data\train-labels-idx1-ubyte.gz
評論
共有 條評論