""" @author: victor Convolutional Neural Network Example Build a convolutional neural network with Tensorflow This example is using TensorFlow layers API see 'convolutional_network_raw' example for a raw TensorFlow implementation with variables """
#Network parameters num_input=784#MNIST data input(img shape:28*28) num_classes=10#MNIST total classes(0-9 digits) dropout=0.25#Dropout,probability to drop a unit
#Create the neural network defconv_net(x_dict,n_classes,dropout,reuse,is_training): #Define a scope for reusing the variables with tf.variable_scope('ConvNet',reuse=reuse): #tf Estimator input is a dict,in case of multiple inputs x=x_dict['images']
#MNIST data input is a 1-D vector of 784 features(28*28 pixels) #Reshape to match picture format [Height x Width x Channel] #Tensor input become 4-D:[Batch Size,Height,Width,Channel] x=tf.reshape(x,shape=[-1,28,28,1])
#Convolution Layer with 32 filters and a kernel size of 5 conv1=tf.layers.conv2d(x,32,5,activation=tf.nn.relu)
#Max Pooling(down-sampling) with strides of 2 and kernel size of 2 conv1=tf.layers.max_pooling2d(conv1,2,2)
#Convolution Layer with 64 filters and a kernel size of 3 conv2=tf.layers.conv2d(conv1,64,3,activation=tf.nn.relu)
#Max Pooling(down-sampling) with strides of 2 and kernel size of 2 conv2=tf.layers.average_pooling2d(conv2,2,2)
#Flatten the data to a 1-D vector for the fully connected layer fc1=tf.contrib.layers.flatten(conv2)
#Fully connected layer(in tf contrib folder for now) fc1=tf.layers.dense(fc1,1024)
#Apply Dropout(if is_training is False,dropout is not applied) fc1=tf.layers.dropout(fc1,rate=dropout,training=is_training)
#Define the model function(following Tf Estimator Template)
defmodel_fn(features,labels,mode): #Build the neural network #Because Dropout have different behavior at training and prediction time #we need to create 2 distinct computation graphs that still share the same weights logits_train=conv_net(features,num_classes,dropout,reuse=False,is_training=True) logits_test=conv_net(features,num_classes,dropout,reuse=True,is_training=False)
#If prediction mode,early return if mode==tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode,predictions=pred_classes)
#Define loss and optimizer loss_op=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits_train,labels=tf.cast(labels,dtype=tf.int32)))
#Evaluate the accuracy of the model acc_op=tf.metrics.accuracy(labels=labels,predictions=pred_classes)
#TF Estimators requires to return a EstimatorSpec,that specify #the different ops for training,evaluating,... estim_specs=tf.estimator.EstimatorSpec( mode=mode, predictions=pred_classes, loss=loss_op, train_op=train_op, eval_metric_ops={'accuracy':acc_op})
return estim_specs
#Build the Estimator model=tf.estimator.Estimator(model_fn)
#Define the input function for training input_fn=tf.estimator.inputs.numpy_input_fn( x={'images':mnist.train.images}, y=mnist.train.labels, batch_size=batch_size, num_epochs=None, shuffle=True)
#Train the Model model.train(input_fn,steps=num_steps)
#Predict single images n_images=10
#Get images from test set test_images=mnist.test.images[:n_images]
#Prepare the input data input_fn=tf.estimator.inputs.numpy_input_fn( x={'images':test_images},shuffle=False)
#Use the model to predict the images class preds=list(model.predict(input_fn))
#Display for i in range(n_images): plt.imshow(np.reshape(test_images[i],[28,28]),cmap='gray') plt.show() print('Model prediction:',preds[i]) plt.xlabel('Model prediction:'+str(preds[i]),fontsize=14) plt.pause(0.5)