分享web开发知识

注册/登录|最近发布|今日推荐

主页 IT知识网页技术软件开发前端开发代码编程运营维护技术分享教程案例
当前位置:首页 > 网页技术

《Tensorflow实战》之6.3VGGnet学习

发布时间:2023-09-06 01:31责任编辑:郭大石关键词:暂无标签

这是我改写的代码,可以运行,但是过拟合现象严重,不知道怎么修改比较好

# -*- coding: utf-8 -*-"""Created on Wed Dec 20 14:45:35 2017@author: Administrator"""#coding:utf-8# Copyright 2016 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## ????http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# ==============================================================================import tensorflow as tfimport numpy as npdata_name = ‘YaleB_32x32.mat‘sele_num ?= 10import matlab.engineeng = matlab.engine.start_matlab()t = eng.data_imread_MSE(data_name,sele_num)eng.quit()#t = np.array(t)Train_Ma ?= np.array(t[0]).astype(np.float32)Train_Lab = np.array(t[1]).astype(np.int8)Test_Ma ??= np.array(t[2]).astype(np.float32)Test_Lab ?= np.array(t[3]).astype(np.int8)Num_fea ??= Train_Ma.shape[1]Num_Class = Train_Lab.shape[1]image_row ???= 32image_column = 32def conv_op(input_op, name, kh, kw, n_out, dh, dw, p): ???n_in = input_op.get_shape()[-1].value ???with tf.name_scope(name) as scope: ???????kernel = tf.get_variable(scope+"w", ????????????????????????????????shape=[kh, kw, n_in, n_out], ????????????????????????????????dtype=tf.float32, ?????????????????????????????????initializer=tf.contrib.layers.xavier_initializer_conv2d()) ???????conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding=‘SAME‘) ???????bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32) ???????biases = tf.Variable(bias_init_val, trainable=True, name=‘b‘) ???????z = tf.nn.bias_add(conv, biases) ???????activation = tf.nn.relu(z, name=scope) ???????p += [kernel, biases] ???????return activation# 全连接层函数def fc_op(input_op, name, n_out, p): ???n_in = input_op.get_shape()[-1].value ???with tf.name_scope(name) as scope: ???????kernel = tf.get_variable(scope+"w", ????????????????????????????????shape=[n_in, n_out], ????????????????????????????????dtype=tf.float32, ?????????????????????????????????initializer=tf.contrib.layers.xavier_initializer()) ???????biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name=‘b‘) ???????activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope) ???????p += [kernel, biases] ???????return activationdef mpool_op(input_op, name, kh, kw, dh, dw): ???return tf.nn.max_pool(input_op, ?????????????????????????ksize=[1, kh, kw, 1], ?????????????????????????strides=[1, dh, dw, 1], ?????????????????????????padding=‘SAME‘, ?????????????????????????name=name) ???# assume input_op shape is 224x224x3sess = tf.InteractiveSession()# ---------- 定义 输入和输出 --------------- #x = tf.placeholder(tf.float32, [None, Num_fea])y_ = tf.placeholder(tf.float32, [None, Num_Class])x_image = tf.reshape(x, [-1,image_row,image_column,1])keep_prob = tf.placeholder(tf.float32) ???# block 1 -- outputs 112x112x64p = []conv1_1 = conv_op(x_image, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)conv1_2 = conv_op(conv1_1, ?name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)pool1 ??= mpool_op(conv1_2, ??name="pool1", ??kh=2, kw=2, dw=2, dh=2)# block 2 -- outputs 56x56x128conv2_1 = conv_op(pool1, ???name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)conv2_2 = conv_op(conv2_1, ?name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)pool2 ??= mpool_op(conv2_2, ??name="pool2", ??kh=2, kw=2, dh=2, dw=2)# # block 3 -- outputs 28x28x256conv3_1 = conv_op(pool2, ???name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)conv3_2 = conv_op(conv3_1, ?name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)conv3_3 = conv_op(conv3_2, ?name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) ???pool3 ??= mpool_op(conv3_3, ??name="pool3", ??kh=2, kw=2, dh=2, dw=2)# block 4 -- outputs 14x14x512conv4_1 = conv_op(pool3, ???name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv4_2 = conv_op(conv4_1, ?name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv4_3 = conv_op(conv4_2, ?name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)pool4 ??= mpool_op(conv4_3, ??name="pool4", ??kh=2, kw=2, dh=2, dw=2)# block 5 -- outputs 7x7x512conv5_1 = conv_op(pool4, ???name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv5_2 = conv_op(conv5_1, ?name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv5_3 = conv_op(conv5_2, ?name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)pool5 ??= mpool_op(conv5_3, ??name="pool5", ??kh=2, kw=2, dw=2, dh=2)# flattenshp = pool5.get_shape()flattened_shape = shp[1].value * shp[2].value * shp[3].valueresh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1") ???# fully connectedfc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")fc8 = fc_op(fc7_drop, name="fc8", n_out=Num_Class, p=p)predictions = tf.nn.softmax(fc8)cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(predictions), reduction_indices=[1]))train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(y_,1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))tf.global_variables_initializer().run()for i in range(1000): ???train_accuracy = accuracy.eval(feed_dict={ ???????x:Train_Ma, y_: Train_Lab, keep_prob: 1.0}) ???print("step %d, training accuracy %g"%(i, train_accuracy)) ???train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.8})print("test accuracy %g"%accuracy.eval(feed_dict={ ???x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

另外一种更简便的改写

# -*- coding: utf-8 -*-"""Created on Wed Dec 20 15:40:44 2017@author: Administrator"""# -*- coding: utf-8 -*-"""Created on Wed Dec 20 14:45:35 2017@author: Administrator"""#coding:utf-8# Copyright 2016 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## ????http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# ==============================================================================import tensorflow as tfimport numpy as npdata_name = ‘YaleB_32x32.mat‘sele_num ?= 10import matlab.engineeng = matlab.engine.start_matlab()t = eng.data_imread_MSE(data_name,sele_num)eng.quit()#t = np.array(t)Train_Ma ?= np.array(t[0]).astype(np.float32)Train_Lab = np.array(t[1]).astype(np.int8)Test_Ma ??= np.array(t[2]).astype(np.float32)Test_Lab ?= np.array(t[3]).astype(np.int8)Num_fea ??= Train_Ma.shape[1]Num_Class = Train_Lab.shape[1]image_row ???= 32image_column = 32def conv_op(input_op, name, kh, kw, n_out, dh, dw, p): ???n_in = input_op.get_shape()[-1].value ???with tf.name_scope(name) as scope: ???????kernel = tf.get_variable(scope+"w", ????????????????????????????????shape=[kh, kw, n_in, n_out], ????????????????????????????????dtype=tf.float32, ?????????????????????????????????initializer=tf.contrib.layers.xavier_initializer_conv2d()) ???????conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding=‘SAME‘) ???????bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32) ???????biases = tf.Variable(bias_init_val, trainable=True, name=‘b‘) ???????z = tf.nn.bias_add(conv, biases) ???????activation = tf.nn.relu(z, name=scope) ???????p += [kernel, biases] ???????return activation# 全连接层函数def fc_op(input_op, name, n_out, p): ???n_in = input_op.get_shape()[-1].value ???with tf.name_scope(name) as scope: ???????kernel = tf.get_variable(scope+"w", ????????????????????????????????shape=[n_in, n_out], ????????????????????????????????dtype=tf.float32, ?????????????????????????????????initializer=tf.contrib.layers.xavier_initializer()) ???????biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name=‘b‘) ???????activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope) ???????p += [kernel, biases] ???????return activationdef mpool_op(input_op, name, kh, kw, dh, dw): ???return tf.nn.max_pool(input_op, ?????????????????????????ksize=[1, kh, kw, 1], ?????????????????????????strides=[1, dh, dw, 1], ?????????????????????????padding=‘SAME‘, ?????????????????????????name=name) ???# assume input_op shape is 224x224x3 ???# block 1 -- outputs 112x112x64def inference_op(input_op, keep_prob): ???p = [] ???# assume input_op shape is 224x224x3 ???# block 1 -- outputs 112x112x64 ???conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p) ???conv1_2 = conv_op(conv1_1, ?name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p) ???pool1 = mpool_op(conv1_2, ??name="pool1", ??kh=2, kw=2, dw=2, dh=2) ???# block 2 -- outputs 56x56x128 ???conv2_1 = conv_op(pool1, ???name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p) ???conv2_2 = conv_op(conv2_1, ?name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p) ???pool2 = mpool_op(conv2_2, ??name="pool2", ??kh=2, kw=2, dh=2, dw=2) ???# # block 3 -- outputs 28x28x256 ???conv3_1 = conv_op(pool2, ???name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) ???conv3_2 = conv_op(conv3_1, ?name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) ???conv3_3 = conv_op(conv3_2, ?name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) ???????pool3 = mpool_op(conv3_3, ??name="pool3", ??kh=2, kw=2, dh=2, dw=2) ???# block 4 -- outputs 14x14x512 ???conv4_1 = conv_op(pool3, ???name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) ???conv4_2 = conv_op(conv4_1, ?name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) ???conv4_3 = conv_op(conv4_2, ?name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) ???pool4 = mpool_op(conv4_3, ??name="pool4", ??kh=2, kw=2, dh=2, dw=2) ???# block 5 -- outputs 7x7x512 ???conv5_1 = conv_op(pool4, ???name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) ???conv5_2 = conv_op(conv5_1, ?name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) ???conv5_3 = conv_op(conv5_2, ?name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) ???pool5 = mpool_op(conv5_3, ??name="pool5", ??kh=2, kw=2, dw=2, dh=2) ???# flatten ???shp = pool5.get_shape() ???flattened_shape = shp[1].value * shp[2].value * shp[3].value ???resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1") ???# fully connected ???fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p) ???fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop") ???fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p) ???fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop") ???fc8 = fc_op(fc7_drop, name="fc8", n_out=Num_Class, p=p) ???predictions = tf.nn.softmax(fc8) ???return predictions, fc8, p# ---------- 定义 输入和输出 --------------- #sess = tf.InteractiveSession()x = tf.placeholder(tf.float32, [None, Num_fea])y_ = tf.placeholder(tf.float32, [None, Num_Class])x_image = tf.reshape(x, [-1,image_row,image_column,1])keep_prob = tf.placeholder(tf.float32)predictions, fc8, p = inference_op(x_image, keep_prob)cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(predictions), reduction_indices=[1]))train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(y_,1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))tf.global_variables_initializer().run()for i in range(100): ???train_accuracy = accuracy.eval(feed_dict={ ???????x:Train_Ma, y_: Train_Lab, keep_prob: 1.0}) ???print("step %d, training accuracy %g"%(i, train_accuracy)) ???train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.8})print("test accuracy %g"%accuracy.eval(feed_dict={ ???x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

  

  

《Tensorflow实战》之6.3VGGnet学习

原文地址:http://www.cnblogs.com/Jerry-PR/p/8074076.html

知识推荐

我的编程学习网——分享web前端后端开发技术知识。 垃圾信息处理邮箱 tousu563@163.com 网站地图
icp备案号 闽ICP备2023006418号-8 不良信息举报平台 互联网安全管理备案 Copyright 2023 www.wodecom.cn All Rights Reserved