分享web开发知识

注册/登录|最近发布|今日推荐

主页 IT知识网页技术软件开发前端开发代码编程运营维护技术分享教程案例
当前位置:首页 > IT知识

2.3AutoEncoder

发布时间:2023-09-06 01:32责任编辑:顾先生关键词:暂无标签

AutoEncoder是包含一个压缩和解压缩的过程,属于一种无监督学习的降维技术。

神经网络接受大量信息,有时候接受的数据达到上千万,可以通过压缩

提取原图片最具有代表性的信息,压缩输入的信息量,在将缩减后的数据放入神经网络中学习,如此学习起来变得轻松了

自编码在这个时候使用,可以将自编码归为无监督学习,类似于PCA,自编码可以为属性降维

手写体识别代码AutoEncoder

from __future__ import division, print_function, absolute_importimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as plt# Import MNIST datafrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets(‘MNIST_data‘, one_hot=False)# Visualize decoder setting# Parameterslearning_rate = 0.01training_epochs = 5batch_size = 256display_step = 1examples_to_show = 10# Network Parametersn_input = 784 ?# MNIST data input (img shape: 28*28)# tf Graph input (only pictures)X = tf.placeholder("float", [None, n_input])# hidden layer settingsn_hidden_1 = 256 # 1st layer num featuresn_hidden_2 = 128 # 2nd layer num featuresweights = { ???‘encoder_h1‘: tf.Variable(tf.random_normal([n_input, n_hidden_1])), ???‘encoder_h2‘: tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), ???‘decoder_h1‘: tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), ???‘decoder_h2‘: tf.Variable(tf.random_normal([n_hidden_1, n_input])),}biases = { ???‘encoder_b1‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘encoder_b2‘: tf.Variable(tf.random_normal([n_hidden_2])), ???‘decoder_b1‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘decoder_b2‘: tf.Variable(tf.random_normal([n_input])),}# Building the encoderdef encoder(x): ???# Encoder Hidden layer with sigmoid activation #1 ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘encoder_h1‘]), ??????????????????????????????????biases[‘encoder_b1‘])) ???# Decoder Hidden layer with sigmoid activation #2 ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘encoder_h2‘]), ??????????????????????????????????biases[‘encoder_b2‘])) ???return layer_2# Building the decoderdef decoder(x): ???# Encoder Hidden layer with sigmoid activation #1 ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘decoder_h1‘]), ??????????????????????????????????biases[‘decoder_b1‘])) ???# Decoder Hidden layer with sigmoid activation #2 ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘decoder_h2‘]), ??????????????????????????????????biases[‘decoder_b2‘])) ???return layer_2"""# Visualize encoder setting# Parameterslearning_rate = 0.01 ???# 0.01 this learning rate will be better! Testedtraining_epochs = 10batch_size = 256display_step = 1# Network Parametersn_input = 784 ?# MNIST data input (img shape: 28*28)# tf Graph input (only pictures)X = tf.placeholder("float", [None, n_input])# hidden layer settingsn_hidden_1 = 128n_hidden_2 = 64n_hidden_3 = 10n_hidden_4 = 2 ?#2D showweights = { ???‘encoder_h1‘: tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)), ???‘encoder_h2‘: tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)), ???‘encoder_h3‘: tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)), ???‘encoder_h4‘: tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)), ???‘decoder_h1‘: tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)), ???‘decoder_h2‘: tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)), ???‘decoder_h3‘: tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)), ???‘decoder_h4‘: tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)),}biases = { ???‘encoder_b1‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘encoder_b2‘: tf.Variable(tf.random_normal([n_hidden_2])), ???‘encoder_b3‘: tf.Variable(tf.random_normal([n_hidden_3])), ???‘encoder_b4‘: tf.Variable(tf.random_normal([n_hidden_4])), ???‘decoder_b1‘: tf.Variable(tf.random_normal([n_hidden_3])), ???‘decoder_b2‘: tf.Variable(tf.random_normal([n_hidden_2])), ???‘decoder_b3‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘decoder_b4‘: tf.Variable(tf.random_normal([n_input])),}def encoder(x): ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘encoder_h1‘]), ??????????????????????????????????biases[‘encoder_b1‘])) ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘encoder_h2‘]), ??????????????????????????????????biases[‘encoder_b2‘])) ???layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights[‘encoder_h3‘]), ??????????????????????????????????biases[‘encoder_b3‘])) ???layer_4 = tf.add(tf.matmul(layer_3, weights[‘encoder_h4‘]), ???????????????????????????????????biases[‘encoder_b4‘]) ???return layer_4def decoder(x): ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘decoder_h1‘]), ??????????????????????????????????biases[‘decoder_b1‘])) ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘decoder_h2‘]), ??????????????????????????????????biases[‘decoder_b2‘])) ???layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights[‘decoder_h3‘]), ???????????????????????????????biases[‘decoder_b3‘])) ???layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights[‘decoder_h4‘]), ???????????????????????????????biases[‘decoder_b4‘])) ???return layer_4"""# Construct modelencoder_op = encoder(X)decoder_op = decoder(encoder_op)# Predictiony_pred = decoder_op# Targets (Labels) are the input data.y_true = X# Define loss and optimizer, minimize the squared errorcost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)# Launch the graphwith tf.Session() as sess: ???# tf.initialize_all_variables() no long valid from ???# 2017-03-02 if using tensorflow >= 0.12 ???if int((tf.__version__).split(‘.‘)[1]) < 12 and int((tf.__version__).split(‘.‘)[0]) < 1: ???????init = tf.initialize_all_variables() ???else: ???????init = tf.global_variables_initializer() ???sess.run(init) ???total_batch = int(mnist.train.num_examples/batch_size) ???# Training cycle ???for epoch in range(training_epochs): ???????# Loop over all batches ???????for i in range(total_batch): ???????????batch_xs, batch_ys = mnist.train.next_batch(batch_size) ?# max(x) = 1, min(x) = 0 ???????????# Run optimization op (backprop) and cost op (to get loss value) ???????????_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) ???????# Display logs per epoch step ???????if epoch % display_step == 0: ???????????print("Epoch:", ‘%04d‘ % (epoch+1), ?????????????????"cost=", "{:.9f}".format(c)) ???print("Optimization Finished!") ???# # Applying encode and decode over test set ???encode_decode = sess.run( ???????y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) ???# Compare original images with their reconstructions ???f, a = plt.subplots(2, 10, figsize=(10, 2)) ???for i in range(examples_to_show): ???????a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) ???????a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) ???plt.show() ???# encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images}) ???# plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels) ???# plt.colorbar() ???# plt.show()

利用AutoEncoder进行类似于PCA的降维

代码:

from __future__ import division, print_function, absolute_importimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as plt# Import MNIST datafrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets(‘MNIST_data‘, one_hot=False)"""# Visualize decoder setting# Parameterslearning_rate = 0.01training_epochs = 5batch_size = 256display_step = 1examples_to_show = 10# Network Parametersn_input = 784 ?# MNIST data input (img shape: 28*28)# tf Graph input (only pictures)X = tf.placeholder("float", [None, n_input])# hidden layer settingsn_hidden_1 = 256 # 1st layer num featuresn_hidden_2 = 128 # 2nd layer num featuresweights = { ???‘encoder_h1‘: tf.Variable(tf.random_normal([n_input, n_hidden_1])), ???‘encoder_h2‘: tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), ???‘decoder_h1‘: tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), ???‘decoder_h2‘: tf.Variable(tf.random_normal([n_hidden_1, n_input])),}biases = { ???‘encoder_b1‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘encoder_b2‘: tf.Variable(tf.random_normal([n_hidden_2])), ???‘decoder_b1‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘decoder_b2‘: tf.Variable(tf.random_normal([n_input])),}# Building the encoderdef encoder(x): ???# Encoder Hidden layer with sigmoid activation #1 ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘encoder_h1‘]), ??????????????????????????????????biases[‘encoder_b1‘])) ???# Decoder Hidden layer with sigmoid activation #2 ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘encoder_h2‘]), ??????????????????????????????????biases[‘encoder_b2‘])) ???return layer_2# Building the decoderdef decoder(x): ???# Encoder Hidden layer with sigmoid activation #1 ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘decoder_h1‘]), ??????????????????????????????????biases[‘decoder_b1‘])) ???# Decoder Hidden layer with sigmoid activation #2 ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘decoder_h2‘]), ??????????????????????????????????biases[‘decoder_b2‘])) ???return layer_2"""# Visualize encoder setting# Parameterslearning_rate = 0.01 ???# 0.01 this learning rate will be better! Testedtraining_epochs = 10batch_size = 256display_step = 1# Network Parametersn_input = 784 ?# MNIST data input (img shape: 28*28)# tf Graph input (only pictures)X = tf.placeholder("float", [None, n_input])# hidden layer settingsn_hidden_1 = 128n_hidden_2 = 64n_hidden_3 = 10n_hidden_4 = 2 ?#2D showweights = { ???‘encoder_h1‘: tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)), ???‘encoder_h2‘: tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)), ???‘encoder_h3‘: tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)), ???‘encoder_h4‘: tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)), ???‘decoder_h1‘: tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)), ???‘decoder_h2‘: tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)), ???‘decoder_h3‘: tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)), ???‘decoder_h4‘: tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)),}biases = { ???‘encoder_b1‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘encoder_b2‘: tf.Variable(tf.random_normal([n_hidden_2])), ???‘encoder_b3‘: tf.Variable(tf.random_normal([n_hidden_3])), ???‘encoder_b4‘: tf.Variable(tf.random_normal([n_hidden_4])), ???‘decoder_b1‘: tf.Variable(tf.random_normal([n_hidden_3])), ???‘decoder_b2‘: tf.Variable(tf.random_normal([n_hidden_2])), ???‘decoder_b3‘: tf.Variable(tf.random_normal([n_hidden_1])), ???‘decoder_b4‘: tf.Variable(tf.random_normal([n_input])),}def encoder(x): ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘encoder_h1‘]), ??????????????????????????????????biases[‘encoder_b1‘])) ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘encoder_h2‘]), ??????????????????????????????????biases[‘encoder_b2‘])) ???layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights[‘encoder_h3‘]), ??????????????????????????????????biases[‘encoder_b3‘])) ???layer_4 = tf.add(tf.matmul(layer_3, weights[‘encoder_h4‘]), ???????????????????????????????????biases[‘encoder_b4‘]) ???return layer_4def decoder(x): ???layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights[‘decoder_h1‘]), ??????????????????????????????????biases[‘decoder_b1‘])) ???layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights[‘decoder_h2‘]), ??????????????????????????????????biases[‘decoder_b2‘])) ???layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights[‘decoder_h3‘]), ???????????????????????????????biases[‘decoder_b3‘])) ???layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights[‘decoder_h4‘]), ???????????????????????????????biases[‘decoder_b4‘])) ???return layer_4# Construct modelencoder_op = encoder(X)decoder_op = decoder(encoder_op)# Predictiony_pred = decoder_op# Targets (Labels) are the input data.y_true = X# Define loss and optimizer, minimize the squared errorcost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)# Launch the graphwith tf.Session() as sess: ???# tf.initialize_all_variables() no long valid from ???# 2017-03-02 if using tensorflow >= 0.12 ???if int((tf.__version__).split(‘.‘)[1]) < 12 and int((tf.__version__).split(‘.‘)[0]) < 1: ???????init = tf.initialize_all_variables() ???else: ???????init = tf.global_variables_initializer() ???sess.run(init) ???total_batch = int(mnist.train.num_examples/batch_size) ???# Training cycle ???for epoch in range(training_epochs): ???????# Loop over all batches ???????for i in range(total_batch): ???????????batch_xs, batch_ys = mnist.train.next_batch(batch_size) ?# max(x) = 1, min(x) = 0 ???????????# Run optimization op (backprop) and cost op (to get loss value) ???????????_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) ???????# Display logs per epoch step ???????if epoch % display_step == 0: ???????????print("Epoch:", ‘%04d‘ % (epoch+1), ?????????????????"cost=", "{:.9f}".format(c)) ???print("Optimization Finished!")# ????# # Applying encode and decode over test set# ????encode_decode = sess.run(# ????????y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})# ????# Compare original images with their reconstructions# ????f, a = plt.subplots(2, 10, figsize=(10, 2))# ????for i in range(examples_to_show):# ????????a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))# ????????a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))# ????plt.show() ???encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images}) ???plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels) ???plt.colorbar() ???plt.show()

显示如下:

2.3AutoEncoder

原文地址:https://www.cnblogs.com/jackchen-Net/p/8125884.html

知识推荐

我的编程学习网——分享web前端后端开发技术知识。 垃圾信息处理邮箱 tousu563@163.com 网站地图
icp备案号 闽ICP备2023006418号-8 不良信息举报平台 互联网安全管理备案 Copyright 2023 www.wodecom.cn All Rights Reserved