感知器算法

来源:互联网 发布:社区控烟网络会议记录 编辑:程序博客网 时间:2024/06/06 02:00

**感知器算法:
线性判别函数 或者说广义的线性判别曲面 进行二分类或者多分类的代码实现
以及使用iris_data 的感知机测试代码
不断地迭代赏罚 为后面的梯度下降方式做铺垫**

-

github:https://github.com/cuixuage/Machine_Learning
参考资料 https://ljalphabeta.gitbooks.io/python-/content/ch2section3.html

-

1.模型二分类问题 得到一个判别函数

例如: W1 W2 分别为二类模式
用感知器算法求下列模式分类的解向量w:
ω1: {(0 0 0)T, (1 0 0)T, (1 0 1)T, (1 1 0)T}
ω2: {(0 0 1)T, (0 1 1)T, (0 1 0)T, (1 1 1)T}
编写求解上述问题的感知器算法程序

#coding:utf-8二分类问题   假设线性可分用感知器算法求下列模式分类的解向量w:    ω1: {(0 0 0)T, (1 0 0)T, (1 0 1)T, (1 1 0)T}    ω2: {(0 0 1)T, (0 1 1)T, (0 1 0)T, (1 1 1)T}编写求解上述问题的感知器算法程序import numpy as npx1 = np.mat([0,0,0,1])x2 = np.mat([1,0,0,1])x3 = np.mat([1,0,1,1])x4 = np.mat([1,1,0,1])#w2区域的全部乘以 -1 作为分类的区分x5 = np.mat([0,0,-1,-1])x6 = np.mat([0,-1,-1,-1])x7 = np.mat([0,-1,0,-1])x8 = np.mat([-1,-1,-1,-1])x = [x1.T,x2.T,x3.T,x4.T,x5.T,x6.T,x7.T,x8.T]#print(x)w = np.mat([0,0,0,0])         #初始化取C=1,w(1)= (0 0 0)w = w.T#print(w)bool = Truewhile bool:    bool = False    for i in range(len(x)):        d= w.T * x[i]       #得到1*1矩阵        print(d.sum())        if d <= 0:            w = w + x[i]      #惩罚            bool = True       #存在惩罚 继续迭代    print("***********")print(w)

-

2.多分类问题 得到广义的线性判别曲面

用多类感知器算法求下列模式的判别函数:
ω1: (-1 -1)T
ω2: (0 0)T
ω3: (1 1)T

import numpy as np多分类问题 广义的线性可分 三维空间中判别面用多类感知器算法求下列模式的判别函数:        ω1: (-1 -1)T        ω2: (0 0)T        ω3: (1 1)Tx1 = np.mat([-1,-1,1])x2 = np.mat([0,0,1])x3 = np.mat([1,1,1])x = [x1.T,x2.T,x3.T]w1 = np.mat([0,0,0])w2 = np.mat([0,0,0])w3 = np.mat([0,0,0])w = [w1.T,w2.T,w3.T]w2 = [w1,w2,w3]f = Truecount = 0while f:    f = False    for i in range(len(x)):        count += 1        d = []        for j in range(len(w)):            d.append(w[j].T * x[i])        print(d)        print(w2)        if (count % 3 == 1):            if d[0] <= d[1] or d[0] <= d[2]:                w[0] += x[0]                w[1] -= x[0]                w[2] -= x[0]                f = True    # d = np.array(d)        if(count%3==2):            if d[1] <= d[0] or d[1] <= d[2]:                w[0] -= x[1]                w[1] += x[1]                w[2] -= x[1]                f = True        if(count%3==0):            if  d[2] <= d[1] or d[2] <= d[1]:                w[0] -= x[2]                w[1] -= x[2]                w[2] += x[2]                f = True        print count,"**********"print wprint w2

-
**

3.IRIS_DATA数据集上的感知器计算

**
感知器算法类如下
参考来源 python_machine_learning :
https://ljalphabeta.gitbooks.io/python-/content/ch2section3.html
-

import numpy as npclass Perceptron(object):    """Perceptron classifier.    Parameters    ------------    eta:float        Learning rate (between 0.0 and 1.0)    n_iter:int        Passes over the training dataset.    Attributes    -------------    w_: 1d-array        Weights after fitting.    errors_: list        Numebr of misclassifications in every epoch.    """    def __init__(self, eta=0.01, n_iter=10):        self.eta = eta        self.n_iter = n_iter    def fit(self, X, y):        """Fit training data.        Parameters        ------------        X: {array-like}, shape=[n_samples, n_features]            Training vectors, where n_samples is the number of samples            and n_featuers is the number of features.        y: array-like, shape=[n_smaples]            Target values.        Returns        ----------        self: object        """        self.w_ = np.zeros(1 + X.shape[1]) # Add w_0        self.errors_ = []        for _ in range(self.n_iter):            errors = 0            for xi, target in zip(X, y):                update = self.eta * (target - self.predict(xi))                self.w_[1:] += update * xi                self.w_[0] += update                errors += int(update != 0.0)            self.errors_.append(errors)        return self    def net_input(self, X):        """Calculate net input"""        return np.dot(X, self.w_[1:]) + self.w_[0]    def predict(self, X):        """Return class label after unit step"""        return np.where(self.net_input(X) >= 0.0, 1, -1) #analoge ? : in C++
#coding:utf-8#**********************************https://ljalphabeta.gitbooks.io/python-/content/ch2section3.html#pərˈseptränimport matplotlib.pyplot as pltimport numpy as npimport pandas as pdfrom matplotlib.colors import ListedColormapimport Perceptron as perceptron_classdf = pd.read_csv('D:\Pycharm\Projects\Perceptron_1\iris.data', header=None)#print df.tail()#抽取出前100条样本,这正好是Setosa和Versicolor对应的样本,我们将Versicolor对应的数据作为类别1,Setosa对应的作为-1。# 对于特征,我们抽取出sepal length和petal length两维度特征,然后用散点图对数据进行可视化y = df.iloc[0:100, 4].valuesy = np.where(y == 'Iris-setosa', -1, 1)X = df.iloc[0:100, [0, 2]].values# plt.scatter(X[:50, 0], X[:50, 1],color='red', marker='o', label='setosa')# plt.scatter(X[50:100, 0], X[50:100, 1],color='blue', marker='x', label='versicolor')# plt.xlabel('sepal length')# plt.ylabel('petal length')# plt.legend(loc='upper left')# plt.show()#这里是对于感知机模型进行训练ppn = perceptron_class.Perceptron(eta=0.1, n_iter=10)ppn.fit(X, y)#画出分界线def plot_decision_regions(X, y, classifier, resolution=0.02):    # setup marker generator and color map    markers = ('s', 'x', 'o', '^', 'v')    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')    cmap = ListedColormap(colors[:len(np.unique(y))])    # plot the decision surface    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),    np.arange(x2_min, x2_max, resolution))    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)    Z = Z.reshape(xx1.shape)    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)    plt.xlim(xx1.min(), xx1.max())    plt.ylim(xx2.min(), xx2.max())    # plot class samples    for idx, cl in enumerate(np.unique(y)):        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],        alpha=0.8, c=cmap(idx),        marker=markers[idx], label=cl)plot_decision_regions(X, y, classifier=ppn)plt.xlabel('sepal length [cm]')plt.ylabel('petal length [cm]')plt.legend(loc='upper left')plt.show()

测试结果如下 感知器·较好的分开了两类模式

原创粉丝点击