#hidden layer settings n_hidden_1=128#first num features,先经过一个隐藏层压缩成128个features n_hidden_2=64#second num features,在经过一个隐藏层压缩成64个features n_hidden_3=10#third num features,先经过一个隐藏层压缩成10个features n_hidden_4=2#fourth num features,在经过一个隐藏层压缩成2个features #define the weights weights={ 'encoder_h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])), 'encoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])), 'encoder_h3':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_3])), 'encoder_h4':tf.Variable(tf.random_normal([n_hidden_3,n_hidden_4])),
#building the encoder defencoder(x): layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x,weights['encoder_h1']), biases['encoder_b1'] )) #Decoder hidden layer with sigmoid activation function layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['encoder_h2']), biases['encoder_b2'])) layer_3=tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights['encoder_h3']), biases['encoder_b3'] )) #no use activation function layer_4=tf.add(tf.matmul(layer_3,weights['encoder_h4']), biases['encoder_b4']) return layer_4 #building the decoder defdecoder(x): #Encoder hidden layer with sigmoid activation layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x,weights['decoder_h1']), biases['decoder_b1'] )) #Decoder hidden layer with sigmoid activation function layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['decoder_h2']), biases['decoder_b2'])) #Encoder hidden layer with sigmoid activation layer_3=tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights['decoder_h3']), biases['decoder_b3'] )) #Decoder hidden layer with sigmoid activation function layer_4=tf.nn.sigmoid(tf.add(tf.matmul(layer_3,weights['decoder_h4']), biases['decoder_b4'])) return layer_4 #Construct model encoder_op=encoder(X) decoder_op=decoder(encoder_op)
#Prediction y_pred=decoder_op #Targets(Labels) are the input data y_true=X
#Define loss and optimizer,minimize the squre error cost=tf.reduce_mean(tf.pow(y_true-y_pred,2)) optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost)
#Initializing the variables init=tf.initialize_all_variables()
#Launch the graph with tf.Session() as sess: sess.run(init) total_batch=int(mnist.train.num_examples/batch_size) #Train cycle for epoch in range(training_epochs): #Loop overall batches for i in range(total_batch): batch_xs,batch_ys=mnist.train.next_batch(batch_size)#max(x)=1,min(x)=0,batch_xs已经被normalize正规化过了,最大值是1 #Run optimization op (backprop) and cost op (to get loss value) _,c=sess.run([optimizer,cost],feed_dict={X:batch_xs}) #Display logs per epoch step if epoch% display_step==0: print("Epoch",'%04d'%(epoch+1), "cost=","{:9f}".format(c)) print("Optimization Finished!") encoder_result=sess.run(encoder_op,feed_dict={X:mnist.test.images}) plt.scatter(encoder_result[:,0],encoder_result[:,1],c=mnist.test.labels) plt.show()