用Batch Gradient Descent来拟合sinx 发表于 2018-12-15 | 分类于 TensorFlow 字数统计: 282 | 阅读时长 ≈ 1 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263#author:victor#import moduleimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as pltimport math#define a add_layer functiondef add_layer(inputs, in_size, out_size, activation_function=None): # add one more layer and return the output of this layer Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size])) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs # Make up some real datax_data = np.linspace(-np.pi,np.pi,300)[:, np.newaxis]noise = np.random.normal(0, 0.05, x_data.shape)y_data = np.sin(x_data) + noise # define placeholder for inputs to networkxs = tf.placeholder(tf.float32, [None, 1])ys = tf.placeholder(tf.float32, [None, 1])# add hidden layerl1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)# add output layerprediction = add_layer(l1, 10, 1, activation_function=tf.nn.tanh) loss = tf.reduce_mean(tf.square(ys - prediction))train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss) init = tf.initialize_all_variables()sess = tf.Session()sess.run(init) # plot the real datafig = plt.figure()ax = fig.add_subplot(1,1,1)ax.scatter(x_data, y_data)# Interactive mode onplt.ion()plt.show() for i in range(5000): # training sess.run(train_step, feed_dict={xs: x_data, ys: y_data}) if i % 50 == 0: # to visualize the result and remove the previous line try: #ax.lines.remove(lines[0]) #每次抹除线,先暂停0.1秒 plt.pause(0.1) ax.lines.remove(lines[0]) #在图片中,去除掉第一个线段 except Exception: pass prediction_value = sess.run(prediction, feed_dict={xs: x_data}) # plot the prediction lines = ax.plot(x_data, prediction_value, 'r-', lw=5) plt.pause(0.1) 运行效果