SVM支持向量机Tensorflow实现

来源:互联网 发布:制作书本的软件 编辑:程序博客网 时间:2024/06/05 05:00


一、tensorflow实现SVM

# -- coding: utf-8 --import matplotlib.pyplot as pltimport numpy as npimport tensorflow as tffrom sklearn import datasets# 获取数据iris = datasets.load_iris()x_vals = np.array([[x[0], x[3]] for x in iris.data])y_vals = np.array([1 if y == 0 else -1 for y in iris.target])# 分离训练和测试集train_indices = np.random.choice(len(x_vals),int(len(x_vals)*0.8),replace=False)test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))x_vals_train = x_vals[train_indices]x_vals_test = x_vals[test_indices]y_vals_train = y_vals[train_indices]y_vals_test = y_vals[test_indices]batch_size = 100# 初始化feedinx_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)# 创建权值参数A = tf.Variable(tf.random_normal(shape=[2, 1]))b = tf.Variable(tf.random_normal(shape=[1, 1]))A2 = tf.Variable(tf.random_normal(shape=[2, 1]))b2 = tf.Variable(tf.random_normal(shape=[1, 1]))# 定义线性模型: y = Ax + bmodel_output = tf.subtract(tf.matmul(x_data, A), b)model_output2 = tf.subtract(tf.matmul(x_data, A2), b2)# Declare vector L2 'norm' function squaredl2_norm = tf.reduce_sum(tf.square(A))# Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2alpha = tf.constant([0.01])classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))classification_term2 = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output2, y_target))))loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))loss2 = tf.add(classification_term2,[0])my_opt = tf.train.GradientDescentOptimizer(0.01)train_step = my_opt.minimize(loss)my_opt2 = tf.train.GradientDescentOptimizer(0.01)train_step2 = my_opt2.minimize(loss2)with tf.Session() as sess:    init = tf.global_variables_initializer()    sess.run(init)# Training loop    loss_vec = []    train_accuracy = []    test_accuracy = []    for i in range(20000):        rand_index = np.random.choice(len(x_vals_train), size=batch_size)        rand_x = x_vals_train[rand_index]        rand_y = np.transpose([y_vals_train[rand_index]])        sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})        sess.run(train_step2, feed_dict={x_data: rand_x, y_target: rand_y})    [[a1], [a2]] = sess.run(A)    [[b]] = sess.run(b)    slope = -a2/a1    y_intercept = b/a1    best_fit = []    [[a12], [a22]] = sess.run(A2)    [[b2]] = sess.run(b2)    slope2 = -a22/a12    y_intercept2 = b2/a12    best_fit2 = []    x1_vals = [d[1] for d in x_vals]    for i in x1_vals:     best_fit.append(slope*i+y_intercept)     best_fit2.append(slope2*i+y_intercept2)# Separate I. setosa    setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]    setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]    not_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]    not_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]    plt.plot(setosa_x, setosa_y, 'o', label='I. setosa')    plt.plot(not_setosa_x, not_setosa_y, 'x', label='Non-setosa')    plt.plot(x1_vals, best_fit, 'r-', label='Linear Separator + w', linewidth=3)    plt.plot(x1_vals, best_fit2, 'r-', label='Linear Separator', color='b', linewidth=3)    plt.ylim([0, 10])    plt.legend(loc='lower right')    plt.title('Sepal Length vs Pedal Width')    plt.xlabel('Pedal Width')    plt.ylabel('Sepal Length')    plt.show()
代码中创建了两个线性模型,再计算损失函数loss时候,一个加了||w||平方,一个没加,所以绘图的时候会有两条线,红色线条实现了支持向量到现行模型距离最大化,可以更好的预测未知模型。

二、tensorflow实现SVM,并保存使用模型

训练代码:

# -- coding: utf-8 --import matplotlib.pyplot as pltimport numpy as npimport tensorflow as tffrom sklearn import datasets# 获取数据iris = datasets.load_iris()x_vals = np.array([[x[0], x[3]] for x in iris.data])y_vals = np.array([1 if y == 0 else -1 for y in iris.target])# 分离训练和测试集train_indices = np.random.choice(len(x_vals),int(len(x_vals)*0.8),replace=False)test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))x_vals_train = x_vals[train_indices]x_vals_test = x_vals[test_indices]y_vals_train = y_vals[train_indices]y_vals_test = y_vals[test_indices]batch_size = 100# 初始化feedinx_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)# 创建权值参数A = tf.Variable(tf.random_normal(shape=[2, 1]))b = tf.Variable(tf.random_normal(shape=[1, 1]))# 定义线性模型: y = Ax + bmodel_output = tf.subtract(tf.matmul(x_data, A), b)# Declare vector L2 'norm' function squaredl2_norm = tf.reduce_sum(tf.square(A))# Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2alpha = tf.constant([0.01])classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))my_opt = tf.train.GradientDescentOptimizer(0.01)train_step = my_opt.minimize(loss)#持久化saver = tf.train.Saver()with tf.Session() as sess:    init = tf.global_variables_initializer()    sess.run(init)# Training loop    for i in range(20000):        rand_index = np.random.choice(len(x_vals_train), size=batch_size)        rand_x = x_vals_train[rand_index]        rand_y = np.transpose([y_vals_train[rand_index]])        sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})    saver.save(sess, "./model/model.ckpt")

使用判断代码:

# -- coding: utf-8 --import matplotlib.pyplot as pltimport numpy as npimport tensorflow as tffrom sklearn import datasets# 获取数据iris = datasets.load_iris()x_vals = np.array([[x[0], x[3]] for x in iris.data])y_vals = np.array([1 if y == 0 else -1 for y in iris.target])# 分离训练和测试集test_indices = np.random.choice(len(x_vals),int(len(x_vals)*0.8),replace=False)x_vals_test = x_vals[test_indices]y_vals_test = y_vals[test_indices]# 初始化feedinx_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)# 创建权值参数A = tf.Variable(tf.random_normal(shape=[2, 1]))b = tf.Variable(tf.random_normal(shape=[1, 1]))# 定义线性模型: y = Ax + bmodel_output = tf.subtract(tf.matmul(x_data, A), b)#判断准确度result = tf.maximum(0., tf.multiply(model_output, y_target))saver = tf.train.Saver()with tf.Session() as sess:    saver.restore(sess, "./model/model.ckpt")    y_test = np.reshape(y_vals_test, (120,1))    array = sess.run(result, feed_dict={x_data: x_vals_test, y_target: y_test})    num = np.array(array)    zero_num = np.sum(num==[0])    print(num)    print(zero_num)






原创粉丝点击