#hidden layer settings n_hidden_1=256#first num features(2^8),先经过一个隐藏层压缩成256个features n_hidden_2=128#second num features(2^7),在经过一个隐藏层压缩成128个features #define the weights weights={ 'encoder_h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])), 'encoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])), #经过一个隐藏层解压缩把128个features解压成256个features 'decoder_h1':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_1])), #经过一个隐藏层解压缩把256个features解压成原来784个features 'decoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_input])), } #define the biases biases={ 'encoder_b1':tf.Variable(tf.random_normal([n_hidden_1])), 'encoder_b2':tf.Variable(tf.random_normal([n_hidden_2])), 'decoder_b1':tf.Variable(tf.random_normal([n_hidden_1])), 'decoder_b2':tf.Variable(tf.random_normal([n_input])), }
#building the encoder defencoder(x): layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x,weights['encoder_h1']), biases['encoder_b1'] )) #Decoder hidden layer with sigmoid activation function layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['encoder_h2']), biases['encoder_b2'])) return layer_2 #building the decoder defdecoder(x): #Encoder hidden layer with sigmoid activation layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x,weights['decoder_h1']), biases['decoder_b1'] )) #Decoder hidden layer with sigmoid activation function layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['decoder_h2']), biases['decoder_b2'])) return layer_2 #Construct model encoder_op=encoder(X) decoder_op=decoder(encoder_op)
#Prediction y_pred=decoder_op #Targets(Labels) are the input data y_true=X
#Define loss and optimizer,minimize the squre error cost=tf.reduce_mean(tf.pow(y_true-y_pred,2)) optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost)
#Initializing the variables init=tf.initialize_all_variables()
#Launch the graph with tf.Session() as sess: sess.run(init) total_batch=int(mnist.train.num_examples/batch_size) #Train cycle for epoch in range(training_epochs): #Loop overall batches for i in range(total_batch): batch_xs,batch_ys=mnist.train.next_batch(batch_size)#max(x)=1,min(x)=0,batch_xs已经被normalize正规化过了,最大值是1 #Run optimization op (backprop) and cost op (to get loss value) _,c=sess.run([optimizer,cost],feed_dict={X:batch_xs}) #Display logs per epoch step if epoch% display_step==0: print("Epoch",'%04d'%(epoch+1), "cost=","{:9f}".format(c)) print("Optimization Finished!") #Applying encode and decode over test set encode_decode=sess.run( y_pred,feed_dict={X:mnist.test.images[:examples_to_show]}) #Compare original images with their reconstructions f,a=plt.subplots(2,10,figsize=(10,2)) for i in range(examples_to_show): #real data a[0][i].imshow(np.reshape(mnist.test.images[i],(28,28))) #predict data a[1][i].imshow(np.reshape(encode_decode[i],(28,28))) plt.show()