添加决策树,朴素贝叶斯和回归的sklearn版本,logistic回归的sklearn版本

This commit is contained in:
chenyyx
2017-07-04 19:59:10 +08:00
parent 3a42d3a7d2
commit 8de61578d9
4 changed files with 381 additions and 97 deletions

View File

@@ -19,36 +19,38 @@ import matplotlib.pyplot as plt
# 创建一个随机的数据集
# 参考 https://docs.scipy.org/doc/numpy-1.6.0/reference/generated/numpy.random.mtrand.RandomState.html
rng = np.random.RandomState(1)
print 'lalalalala===', rng
# print 'lalalalala===', rng
# rand() 是给定形状的随机值rng.rand(80, 1)即矩阵的形状是 80行1列
# sort()
X = np.sort(5 * rng.rand(80, 1), axis=0)
print 'X=', X
# print 'X=', X
y = np.sin(X).ravel()
print 'y=', y
# print 'y=', y
y[::5] += 3 * (0.5 - rng.rand(16))
print 'yyy=', y
# print 'yyy=', y
# 拟合回归模型
regr_1 = DecisionTreeRegressor(max_depth=2)
# regr_1 = DecisionTreeRegressor(max_depth=2)
# 保持 max_depth=5 不变,增加 min_samples_leaf=6 的参数,效果进一步提升了
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=3)
regr_1.fit(X, y)
regr_2 = DecisionTreeRegressor(min_samples_leaf=6)
# regr_3 = DecisionTreeRegressor(max_depth=4)
# regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# regr_3.fit(X, y)
# 预测
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
# y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# y_3 = regr_3.predict(X_test)
# 绘制结果
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
# plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.plot(X_test, y_3, color="red", label="max_depth=3", linewidth=2)
# plt.plot(X_test, y_3, color="red", label="max_depth=3", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")

View File

@@ -8,73 +8,38 @@ NaiveBayes朴素贝叶斯
@author: 小瑶
《机器学习实战》更新地址https://github.com/apachecn/MachineLearning
"""
# GaussianNB_高斯朴素贝叶斯
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
print(__doc__)
# 创建40个分离点
np.random.seed(0)
# X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
# Y = [0] * 20 + [1] * 20
def loadDataSet(fileName):
"""
对文件进行逐行解析,从而得到第行的类标签和整个数据矩阵
Args:
fileName 文件名
Returns:
dataMat 数据矩阵
labelMat 类标签
"""
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
X, Y = loadDataSet('input/6.SVM/testSet.txt')
X = np.mat(X)
print("X=", X)
print("Y=", Y)
# 拟合一个SVM模型
clf = svm.SVC(kernel='linear')
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
Y = np.array([1, 1, 1, 2, 2, 2])
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X, Y)
print clf.predict([[-0.8, -1]])
clf_pf = GaussianNB()
clf_pf.partial_fit(X, Y, np.unique(Y))
print clf_pf.predict([[-0.8, -1]])
# 获取分割超平面
w = clf.coef_[0]
# 斜率
a = -w[0] / w[1]
# 从-5到5顺序间隔采样50个样本默认是num=50
# xx = np.linspace(-5, 5) # , num=50)
xx = np.linspace(-2, 10) # , num=50)
# 二维的直线方程
yy = a * xx - (clf.intercept_[0]) / w[1]
print("yy=", yy)
# MultinomialNB_多项朴素贝叶斯
'''
import numpy as np
X = np.random.randint(5, size=(6, 100))
y = np.array([1, 2, 3, 4, 5, 6])
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(X, y)
print clf.predict(X[2:3])
'''
# plot the parallels to the separating hyperplane that pass through the support vectors
# 通过支持向量绘制分割超平面
print("support_vectors_=", clf.support_vectors_)
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
# BernoulliNB_伯努利朴素贝叶斯
'''
import numpy as np
X = np.random.randint(2, size=(6, 100))
Y = np.array([1, 2, 3, 4, 4, 5])
from sklearn.naive_bayes import BernoulliNB
clf = BernoulliNB()
clf.fit(X, Y)
print clf.predict(X[2:3])
'''

View File

@@ -0,0 +1,280 @@
#!/usr/bin/python
# coding: utf8
'''
Created on Oct 27, 2010
Update on 2017-05-18
Logistic Regression Working Module
@author: 小瑶
《机器学习实战》更新地址https://github.com/apachecn/MachineLearning
scikit-learn的例子地址http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
'''
# 逻辑回归中的 L1 惩罚和稀缺性 L1 Penalty and Sparsity in Logistic Regression
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# 将大小数字分类为小
y = (y > 4).astype(np.int)
# 设置正则化参数
for i, C in enumerate((100, 1, 0.01)):
# 减少训练时间短的容忍度
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
# 由于 L1 稀疏诱导规范coef_l1_LR 包含零
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
'''
# 具有 L1-逻辑回归的路径
'''
print(__doc__)
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
'''
# 绘制多项式和一对二的逻辑回归 Plot multinomial and One-vs-Rest Logistic Regression
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# 制作 3 类数据集进行分类
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# 打印训练分数
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# 创建一个网格来绘制
h = .02 # 网格中的步长
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# 绘制决策边界。为此,我们将为网格 [x_min, x_max]x[y_min, y_max]中的每个点分配一个颜色。
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# 将结果放入彩色图
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# 将训练点也绘制进入
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# 绘制三个一对数分类器
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
'''
# Logistic Regression 3-class Classifier 逻辑回归 3-类 分类器
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# 引入一些数据来玩
iris = datasets.load_iris()
# 我们只采用样本数据的前两个feature
X = iris.data[:, :2]
Y = iris.target
h = .02 # 网格中的步长
logreg = linear_model.LogisticRegression(C=1e5)
# 我们创建了一个 Neighbours Classifier 的实例,并拟合数据。
logreg.fit(X, Y)
# 绘制决策边界。为此我们将为网格 [x_min, x_max]x[y_min, y_max] 中的每个点分配一个颜色。
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# 将结果放入彩色图中
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# 将训练点也同样放入彩色图中
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
'''
# Logistic function 逻辑回归函数
# 这个类似于咱们之前讲解 logistic 回归的 Sigmoid 函数,模拟的阶跃函数
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# 这是我们的测试集,它只是一条直线,带有一些高斯噪声。
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# 运行分类器
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# 并且画出我们的结果
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()

View File

@@ -4,7 +4,7 @@
'''
Created on Jan 8, 2011
Update on 2017-05-18
@author: Peter Harrington/ApacheCN-小瑶
@author: Peter Harrington/小瑶
《机器学习实战》更新地址https://github.com/apachecn/MachineLearning
'''
@@ -12,37 +12,74 @@ Update on 2017-05-18
from numpy import *
import matplotlib.pylab as plt
def loadDataSet(fileName): # 解析以tab键分隔的文件中的浮点数
def loadDataSet(fileName):
""" 加载数据
解析以tab键分隔的文件中的浮点数
Returns
dataMat feature 对应的数据集
labelMat feature 对应的分类标签,即类别标签
dataMat feature 对应的数据集
labelMat feature 对应的分类标签,即类别标签
"""
numFeat = len(open(fileName).readline().split('\t')) - 1 # 获得每一行的输入数据,最后一个代表真实值
dataMat = []; labelMat = []
# 获取样本特征的总数,不算最后的目标变量
numFeat = len(open(fileName).readline().split('\t')) - 1
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines(): # 读取每一行
for line in fr.readlines():
# 读取每一行
lineArr =[]
curLine = line.strip().split('\t') # 删除一行中以tab分隔的数据前后的空白符号
for i in range(numFeat): # 从0到2不包括2
lineArr.append(float(curLine[i]))# 将数据添加到lineArr List中每一行数据测试数据组成一个行向量
dataMat.append(lineArr) # 将测试数据的输入数据部分存储到dataMat矩阵中
labelMat.append(float(curLine[-1]))# 将每一行的最后一个数据即真实的目标变量存储到labelMat矩阵中
# 删除一行中以tab分隔的数据前后的空白符号
curLine = line.strip().split('\t')
# i 从0到2不包括2
for i in range(numFeat):
# 将数据添加到lineArr List中每一行数据测试数据组成一个行向量
lineArr.append(float(curLine[i]))
# 将测试数据的输入数据部分存储到dataMat 的List中
dataMat.append(lineArr)
# 将每一行的最后一个数据即类别或者叫目标变量存储到labelMat List中
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
def standRegres(xArr,yArr): # 线性回归
xMat = mat(xArr); yMat = mat(yArr).T # mat()函数将xArryArr转换为矩阵
xTx = xMat.T*xMat # 矩阵乘法的条件是左矩阵的列数等于右矩阵的行数
if linalg.det(xTx) == 0.0: # 因为要用到xTx的逆矩阵所以事先需要确定计算得到的xTx是否可逆条件是矩阵的行列式不为0
print ("This matrix is singular, cannot do inverse")
def standRegres(xArr,yArr):
'''
Description
线性回归
Args:
xArr :输入的样本数据,包含每个样本数据的 feature
yArr :对应于输入数据的类别标签,也就是每个样本对应的目标变量
Returns:
'''
# mat()函数将xArryArr转换为矩阵 mat().T 代表的是对矩阵进行转置操作
xMat = mat(xArr)
yMat = mat(yArr).T
# 矩阵乘法的条件是左矩阵的列数等于右矩阵的行数
xTx = xMat.T*xMat
# 因为要用到xTx的逆矩阵所以事先需要确定计算得到的xTx是否可逆条件是矩阵的行列式不为0
# linalg.det() 函数是用来求得矩阵的行列式的如果矩阵的行列式为0则这个矩阵是不可逆的就无法进行接下来的运算
if linalg.det(xTx) == 0.0:
print "This matrix is singular, cannot do inverse"
return
# 最小二乘法
# http://www.apache.wiki/pages/viewpage.action?pageId=5505133
ws = xTx.I * (xMat.T*yMat) # 书中的公式求得w的最优解
# 书中的公式求得w的最优解
ws = xTx.I * (xMat.T*yMat)
return ws
def lwlr(testPoint,xArr,yArr,k=1.0): # 局部加权线性回归
# 局部加权线性回归
def lwlr(testPoint,xArr,yArr,k=1.0):
'''
Description
局部加权线性回归
Args
testPoint
xArr
yArr
k:
Returns:
'''
xMat = mat(xArr); yMat = mat(yArr).T
m = shape(xMat)[0] # 获得xMat矩阵的行数
weights = mat(eye((m))) # eye()返回一个对角线元素为1其他元素为0的二维数组创建权重矩阵