91av视频/亚洲h视频/操亚洲美女/外国一级黄色毛片 - 国产三级三级三级三级

資源簡介

結果截圖:

核心代碼:

#訓練函數:
import os
import numpy as np
import tensorflow as tf
import input_data
import model

N_CLASSES = 2  # 2個輸出神經元,[1,0] 或者 [0,1]貓和狗的概率
IMG_W = 208  # 重新定義圖片的大小,圖片如果過大則訓練比較慢
IMG_H = 208
BATCH_SIZE = 32  # 每批數據的大小
CAPACITY = 256
MAX_STEP = 1000  # 訓練的步數,應當 >= 10000,因為訓練過慢,只以1000次為例
learning_rate = 0.0001  # 學習率,建議剛開始的 learning_rate <= 0.0001


def run_training():
    # 數據集
    train_dir = 'd:/computer_sighting/try2_dogcat/train/'  # 訓練集
    # logs_train_dir 存放訓練模型的過程的數據,在tensorboard 中查看
    logs_train_dir = 'd:/computer_sighting/try2_dogcat/logs/'

    # 獲取圖片和標簽集
    train, train_label = input_data.get_files(train_dir)
    # 生成批次
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    # 進入模型
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # 獲取 loss
    train_loss = model.losses(train_logits, train_label_batch)
    # 訓練
    train_op = model.trainning(train_loss, learning_rate)
    # 獲取準確率
    train__acc = model.evaluation(train_logits, train_label_batch)
    # 合并 summary
    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    # 保存summary
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step   1) == MAX_STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()


# train
run_training()
#模型和數據輸入處理過程見附件啦
大概過程就是:建立好模型,訓練大量圖片,之后再用訓練好的模型測試貓狗的圖片就可以實現判別。代碼很清晰,含有注釋,比較好懂!

資源截圖

代碼片段和文件信息

#?coding=utf-8
import?tensorflow?as?tf
from?PIL?import?Image
import?matplotlib.pyplot?as?plt
import?numpy?as?np
import?model
import?os


#?從測試集中選取一張圖片
def?get_one_image(train):
????files?=?os.listdir(train)
????n?=?len(files)
????ind?=?np.random.randint(0?n)
????img_dir?=?os.path.join(train?files[ind])
????image?=?Image.open(img_dir)
????plt.imshow(image)
????plt.show()
????image?=?image.resize([208?208])
????image?=?np.array(image)
????return?image


def?evaluate_one_image():
????test?=?‘d:/computer_sighting/try2_dogcat/test/‘

????#?獲取圖片路徑集和標簽集
????image_array?=?get_one_image(test)

????with?tf.Graph().as_default():
????????BATCH_SIZE?=?1??#?因為只讀取一副圖片?所以batch?設置為1
????????N_CLASSES?=?2??#?2個輸出神經元,[1,0]?或者?[0,1]貓和狗的概率
????????#?轉化圖片格式
????????image?=?tf

?屬性????????????大小?????日期????時間???名稱
-----------?---------??----------?-----??----

?????文件???????2759??2019-09-13?14:08??evaluateCatOrDog.py

?????文件???????4368??2019-09-13?13:44??input_data.py

?????文件???????5425??2019-09-13?13:44??model.py

?????文件???????2965??2019-09-13?14:08??training.py

-----------?---------??----------?-----??----

????????????????15517????????????????????4


評論

共有 條評論