准确度的陷阱与混淆矩阵
import numpy as np
def confusion_matrix(y_true, y_predict):
'''
构建二分类的混淆矩阵,并将其返回
:param y_true: 真实类别,类型为ndarray
:param y_predict: 预测类别,类型为ndarray
:return: shape为(2, 2)的ndarray
'''
#********* Begin *********#
def TN(y_true, y_predict):
return np.sum((y_true == 0) & (y_predict == 0))
def FP(y_true, y_predict):
return np.sum((y_true == 0) & (y_predict == 1))
def FN(y_true, y_predict):
return np.sum((y_true == 1) & (y_predict == 0))
def TP(y_true, y_predict):
return np.sum((y_true == 1) & (y_predict == 1))
return np.array([
[TN(y_true, y_predict), FP(y_true, y_predict)],
[FN(y_true, y_predict), TP(y_true, y_predict)]
])
#********* End *********#
第7关:精准率与召回率
import numpy as np
def precision_score(y_true, y_predict):
'''
计算精准率并返回
:param y_true: 真实类别,类型为ndarray
:param y_predict: 预测类别,类型为ndarray
:return: 精准率,类型为float
'''
#********* Begin *********#
def TP(y_true, y_predict):
return np.sum((y_true ==1)&(y_predict == 1))
def FP(y_true,y_predict):
return np.sum((y_true ==0)&(y_predict==1))
tp =TP(y_true, y_predict)
fp =FP(y_true, y_predict)
try:
return tp /(tp+fp)
except:
return 0.0
#********* End *********#
def recall_score(y_true, y_predict):
'''
计算召回率并召回
:param y_true: 真实类别,类型为ndarray
:param y_predict: 预测类别,类型为ndarray
:return: 召回率,类型为float
'''
#********* Begin *********#
def FN(y_true, y_predict):
return np.sum((y_true ==1)&(y_predict == 0))
def TP(y_true,y_predict):
return np.sum((y_true ==1)&(y_predict==1))
fn =FN(y_true, y_predict)
tp =TP(y_true, y_predict)
try:
return tp /(tp+fn)
except:
return 0.0
#********* End *********#
第8关:F1 Score
import numpy as np
def f1_score(precision, recall):
'''
计算f1 score并返回
:param precision: 模型的精准率,类型为float
:param recall: 模型的召回率,类型为float
:return: 模型的f1 score,类型为float
'''
#********* Begin *********#
try:
return 2*precision*recall / (precision+recall)
except:
return 0.0
#********* End ***********#
第9关:ROC曲线与AUC
import numpy as np
def calAUC(prob, labels):
'''
计算AUC并返回
:param prob: 模型预测样本为Positive的概率列表,类型为ndarray
:param labels: 样本的真实类别列表,其中1表示Positive,0表示Negtive,类型为ndarray
:return: AUC,类型为float
'''
#********* Begin *********#
a= list(zip(prob,labels))
rank =[values2 for values1,values2 in sorted(a, key=lambda x:x[0])]
rankList=[i+1 for i in range(len(rank))if rank[i] ==1]
posNum =0
negNum =0
for i in range(len(labels)):
if(labels[i]==1):
posNum+=1
else:
negNum+=1
auc= (sum(rankList)-(posNum*(posNum+1))/2)/(posNum*negNum)
return auc
#********* End *********#
第10关:sklearn中的分类性能指标
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
def classification_performance(y_true, y_pred, y_prob):
'''
返回准确度、精准率、召回率、f1 Score和AUC
:param y_true:样本的真实类别,类型为`ndarray`
:param y_pred:模型预测出的类别,类型为`ndarray`
:param y_prob:模型预测样本为`Positive`的概率,类型为`ndarray`
:return:
'''
#********* Begin *********#
return accuracy_score(y_true, y_pred),precision_score(y_true, y_pred),recall_score(y_true, y_pred),f1_score(y_true, y_pred),roc_auc_score(y_true, y_prob)
#********* End *********#
第2关:线性回归的正规方程解
#encoding=utf8
import numpy as np
def mse_score(y_predict,y_test):
'''
input:y_predict(ndarray):预测值
y_test(ndarray):真实值
ouput:mse(float):mse损失函数值
'''
#********* Begin *********#
mse = np.mean((y_predict-y_test)**2)
#********* End *********#
return mse
class LinearRegression :
def __init__(self):
'''初始化线性回归模型'''
self.theta = None
def fit_normal(self,train_data,train_label):
'''
input:train_data(ndarray):训练样本
train_label(ndarray):训练标签
'''
#********* Begin *********#
x = np.hstack([np.ones((len(train_data),1)),train_data])
self.theta =np.linalg.inv(x.T.dot(x)).dot(x.T).dot(train_label)
#********* End *********#
return self.theta
def predict(self,test_data):
'''
input:test_data(ndarray):测试样本
'''
#********* Begin *********#
x = np.hstack([np.ones((len(test_data),1)),test_data])
return x.dot(self.theta)
#********* End *********#
第3关:衡量线性回归的性能指标
#encoding=utf8
import numpy as np
#mse
def mse_score(y_predict,y_test):
mse = np.mean((y_predict-y_test)**2)
return mse
#r2
def r2_score(y_predict,y_test):
'''
input:y_predict(ndarray):预测值
y_test(ndarray):真实值
output:r2(float):r2值
'''
#********* Begin *********#
r2 =1-mse_score(y_predict,y_test)/np.var(y_test)
#********* End *********#
return r2
class LinearRegression :
def __init__(self):
'''初始化线性回归模型'''
self.theta = None
def fit_normal(self,train_data,train_label):
'''
input:train_data(ndarray):训练样本
train_label(ndarray):训练标签
'''
#********* Begin *********#
x = np.hstack([np.ones((len(train_data),1)),train_data])
self.theta =np.linalg.inv(x.T.dot(x)).dot(x.T).dot(train_label)
#********* End *********#
return self
def predict(self,test_data):
'''
input:test_data(ndarray):测试样本
'''
#********* Begin *********#
x = np.hstack([np.ones((len(test_data),1)),test_data])
return x.dot(self.theta)
#********* End *********#
第4关:scikit-learn线性回归实践 - 波斯顿房价预测
#encoding=utf8
#********* Begin *********#
import pandas as pd
from sklearn.linear_model import LinearRegression
train_data = pd.read_csv('./step3/train_data.csv')
train_label = pd.read_csv('./step3/train_label.csv')
train_label = train_label['target']
test_data = pd.read_csv('./step3/test_data.csv')
lr = LinearRegression()
lr.fit(train_data,train_label)
predict = lr.predict(test_data)
df = pd.DataFrame({
'result':predict})
df.to_csv('./step3/result.csv', index=False)
#********* End *********#
第1关:实现kNN算法
#encoding=utf8
import numpy as np
class kNNClassifier(object):
def __init__(self, k):
'''
初始化函数
:param k:kNN算法中的k
'''
self.k = k
# 用来存放训练数据,类型为ndarray
self.train_feature = None
# 用来存放训练标签,类型为ndarray
self.train_label = None
def fit(self, feature, label):
'''
kNN算法的训练过程
:param feature: 训练集数据,类型为ndarray
:param label: 训练集标签,类型为ndarray
:return: 无返回
'''
#********* Begin *********#
self.train_feature = np.array(feature)
self.train_label = np.array(label)
#********* End *********#
def predict(self, feature):
'''
kNN算法的预测过程
:param feature: 测试集数据,类型为ndarray
:return: 预测结果,类型为ndarray或list
'''
#********* Begin *********#、
def _predict(test_data):
distances = [np.sqrt(np.sum((test_data - vec) ** 2)) for vec in self.train_feature]
nearest = np.argsort(distances)
topK = [self.train_label[i] for i in nearest[:self.k]]
votes = {}
result = None
max_count = 0
for label in topK:
if label in votes.keys():
votes[label] += 1
if votes[label] > max_count:
max_count = votes[label]
result = label
else:
votes[label] = 1
if votes[label] > max_count:
max_count = votes[label]
result = label
return result
predict_result = [_predict(test_data) for test_data in feature]
return predict_result
#********* End *********#
第2关:红酒分类
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
def classification(train_feature, train_label, test_feature):
'''
对test_feature进行红酒分类
:param train_feature: 训练集数据,类型为ndarray
:param train_label: 训练集标签,类型为ndarray
:param test_feature: 测试集数据,类型为ndarray
:return: 测试集数据的分类结果
'''
#********* Begin *********#
scaler = StandardScaler()
train_feature = scaler.fit_transform(train_feature)
test_feature = scaler.transform(test_feature)
clf = KNeighborsClassifier()
clf.fit(train_feature, train_label)
return clf.predict(test_feature)
#********* End **********#
第3关:朴素贝叶斯分类算法流程
import numpy as np
class NaiveBayesClassifier(object):
def __init__(self):
'''
self.label_prob表示每种类别在数据中出现的概率
例如,{0:0.333, 1:0.667}表示数据中类别0出现的概率为0.333,类别1的概率为0.667
'''
self.label_prob = {}
'''
self.condition_prob表示每种类别确定的条件下各个特征出现的概率
例如训练数据集中的特征为 [[2, 1, 1],
[1, 2, 2],
[2, 2, 2],
[2, 1, 2],
[1, 2, 3]]
标签为[1, 0, 1, 0, 1]
那么当标签为0时第0列的值为1的概率为0.5,值为2的概率为0.5;
当标签为0时第1列的值为1的概率为0.5,值为2的概率为0.5;
当标签为0时第2列的值为1的概率为0,值为2的概率为1,值为3的概率为0;
当标签为1时第0列的值为1的概率为0.333,值为2的概率为0.666;
当标签为1时第1列的值为1的概率为0.333,值为2的概率为0.666;
当标签为1时第2列的值为1的概率为0.333,值为2的概率为0.333,值为3的概率为0.333;
因此self.label_prob的值如下:
{
0:{
0:{
1:0.5
2:0.5
}
1:{
1:0.5
2:0.5
}
2:{
1:0
2:1
3:0
}
}
1:
{
0:{
1:0.333
2:0.666
}
1:{
1:0.333
2:0.666
}
2:{
1:0.333
2:0.333
3:0.333
}
}
}
'''
self.condition_prob = {}
def fit(self, feature, label):
'''
对模型进行训练,需要将各种概率分别保存在self.label_prob和self.condition_prob中
:param feature: 训练数据集所有特征组成的ndarray
:param label:训练数据集中所有标签组成的ndarray
:return: 无返回
'''
#********* Begin *********#
row_num = len(feature)
col_num = len(feature[0])
for c in label:
if c in self.label_prob:
self.label_prob[c] +=1
else:
self.label_prob[c]=1
for key in self.label_prob.keys():
self.label_prob[key]/=row_num
self.condition_prob[key] = {}
for i in range(col_num):
self.condition_prob[key][i] = {}
for k in np.unique(feature[:,i],axis=0):
self.condition_prob[key][i][k]=0
for i in range(len(feature)):
for j in range(len(feature[i])):
# if feature[i][j] in self.condition_prob[label[i]]:
if feature[i][j] in self.condition_prob[label[i]]:
self.condition_prob[label[i]][j][feature[i][j]] +=1
# self.condition_prob[label[i][j][feature[i][j]] +=1
else:
self.condition_prob[label[i]][j][feature[i][j]] =1
for label_key in self.condition_prob.keys():
for k in self.condition_prob[label_key].keys():
total =0
for v in self.condition_prob[label_key][k].values():
total +=v
for kk in self.condition_prob[label_key][k].keys():
self.condition_prob[label_key][k][kk] /=total
#********* End *********#
def predict(self, feature):
'''
对数据进行预测,返回预测结果
:param feature:测试数据集所有特征组成的ndarray
:return:
'''
# ********* Begin *********#
result = []
for i,f in enumerate(feature):
prob = np.zeros(len(self.label_prob.keys()))
ii = 0
for label,label_prob in self.label_prob.items():
prob[ii] = label_prob
for j in range(len(feature[0])):
prob[ii] *=self.condition_prob[label][j][f[j]]
ii+=1
result.append(list(self.label_prob.keys())[np.argmax(prob)])
return np.array(result)
#********* End *********#
第4关:拉普拉斯平滑
import numpy as np
class NaiveBayesClassifier(object):
def __init__(self):
'''
self.label_prob表示每种类别在数据中出现的概率
例如,{0:0.333, 1:0.667}表示数据中类别0出现的概率为0.333,类别1的概率为0.667
'''
self.label_prob = {}
'''
self.condition_prob表示每种类别确定的条件下各个特征出现的概率
例如训练数据集中的特征为 [[2, 1, 1],
[1, 2, 2],
[2, 2, 2],
[2, 1, 2],
[1, 2, 3]]
标签为[1, 0, 1, 0, 1]
那么当标签为0时第0列的值为1的概率为0.5,值为2的概率为0.5;
当标签为0时第1列的值为1的概率为0.5,值为2的概率为0.5;
当标签为0时第2列的值为1的概率为0,值为2的概率为1,值为3的概率为0;
当标签为1时第0列的值为1的概率为0.333,值为2的概率为0.666;
当标签为1时第1列的值为1的概率为0.333,值为2的概率为0.666;
当标签为1时第2列的值为1的概率为0.333,值为2的概率为0.333,值为3的概率为0.333;
因此self.label_prob的值如下:
{
0:{
0:{
1:0.5
2:0.5
}
1:{
1:0.5
2:0.5
}
2:{
1:0
2:1
3:0
}
}
1:
{
0:{
1:0.333
2:0.666
}
1:{
1:0.333
2:0.666
}
2:{
1:0.333
2:0.333
3:0.333
}
}
}
'''
self.condition_prob = {}
def fit(self, feature, label):
'''
对模型进行训练,需要将各种概率分别保存在self.label_prob和self.condition_prob中
:param feature: 训练数据集所有特征组成的ndarray
:param label:训练数据集中所有标签组成的ndarray
:return: 无返回
'''
#********* Begin *********#
row_num = len(feature)
col_num = len(feature[0])
unique_label_count = len(set(label))
for c in label:
if c in self.label_prob:
self.label_prob[c] +=1
else:
self.label_prob[c] =1
for key in self.label_prob.keys():
self.label_prob[key] +=1
self.label_prob[key] /=(unique_label_count+row_num)
self.condition_prob[key] = {}
for i in range(col_num):
self.condition_prob[key][i] = {}
for k in np.unique(feature[:,i],axis=0):
self.condition_prob[key][i][k] = 1
for i in range(len(feature)):
for j in range(len(feature[i])):
if feature[i][j] in self.condition_prob[label[i]]:
self.condition_prob[label[i]][j][feature[i][j]] +=1
for label_key in self.condition_prob.keys():
for k in self.condition_prob[label_key].keys():
total = len(self.condition_prob[label_key].keys())
for v in self.condition_prob[label_key][k].values():
total +=v
for kk in self.condition_prob[label_key][key].keys():
self.condition_prob[label_key][k][kk] /=total
#********* End *********#
def predict(self, feature):
'''
对数据进行预测,返回预测结果
:param feature:测试数据集所有特征组成的ndarray
:return:
'''
result = []
# 对每条测试数据都进行预测
for i, f in enumerate(feature):
# 可能的类别的概率
prob = np.zeros(len(self.label_prob.keys()))
ii = 0
for label, label_prob in self.label_prob.items():
# 计算概率
prob[ii] = label_prob
for j in range(len(feature[0])):
prob[ii] *= self.condition_prob[label][j][f[j]]
ii += 1
# 取概率最大的类别作为结果
result.append(list(self.label_prob.keys())[np.argmax(prob)])
return np.array(result)
第5关:新闻文本主题分类
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
def news_predict(train_sample, train_label, test_sample):
'''
训练模型并进行预测,返回预测结果
:param train_sample:原始训练集中的新闻文本,类型为ndarray
:param train_label:训练集中新闻文本对应的主题标签,类型为ndarray
:param test_sample:原始测试集中的新闻文本,类型为ndarray
:return 预测结果,类型为ndarray
'''
#********* Begin *********#
vec = CountVectorizer()
train_sample = vec.fit_transform(train_sample)
test_sample = vec.transform(test_sample)
tfidf = TfidfTransformer()
train_sample = tfidf.fit_transform(train_sample)
test_sample = tfidf.transform(test_sample)
mnb = MultinomialNB(alpha = 0.01)
mnb.fit(train_sample,train_label)
predict = mnb.predict(test_sample)
return predict
#********* End *********#
第2关:Adaboost算法
# encoding=utf8
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
# adaboost算法
class AdaBoost:
'''
input:n_estimators(int):迭代轮数
learning_rate(float):弱分类器权重缩减系数
'''
def __init__(self, n_estimators=50, learning_rate=1.0):
self.clf_num = n_estimators
self.learning_rate = learning_rate
def init_args(self, datasets, labels):
self.X = datasets
self.Y = labels
self.M, self.N = datasets.shape
# 弱分类器数目和集合
self.clf_sets = []
# 初始化weights
self.weights = [1.0 / self.M] * self.M
# G(x)系数 alpha
self.alpha = []
# ********* Begin *********#
def _G(self, features, labels, weights):
'''
input:features(ndarray):数据特征
labels(ndarray):数据标签
weights(ndarray):样本权重系数
'''
e = 0
for i in range(weights.shape[0]):
if (labels[i] == self.G(self.X[i], self.clif_sets, self.alpha)):
e += weights[i]
return e
# 计算alpha
def _alpha(self, error):
return 0.5 * np.log((1 - error) / error)
# 规范化因子
def _Z(self, weights, a, clf):
return np.sum(weights * np.exp(-a * self.Y * self.G(self.X, clf, self.alpha)))
# 权值更新
def _w(self, a, clf, Z):
w = np.zeros(self.weights.shape)
for i in range(self.M):
w[i] = weights[i] * np.exp(-a * self.Y[i] * G(x, clf, self.alpha)) / Z
self.weights = w
# G(x)的线性组合
def G(self, x, v, direct):
result = 0
x = x.reshape(1, -1)
for i in range(len(v)):
result += v[i].predict(x) * direct[i]
return result
def fit(self, X, y):
'''
X(ndarray):训练数据
y(ndarray):训练标签
'''
# 计算G(x)系数a
self.init_args(X, y)
'''
for i in range(100):
classifier = DecisionTreeClassifier(max_depth=3)
classifier.fit(X, y)
self.clf_sets.append(classifier)
e = 0
for i in range(len(self.weights)):
temp = -1
if classifier.predict(X[i].reshape(1,-1))>0:
temp = 1
if(self.Y[i] == temp):
e += self.weights[i]
a = self._alpha(e)
self.alpha.append(a)
z = self._Z(self.weights, a, self.clf_sets)
self._w(a, self.clf_sets, z)
'''
# 记录分类器
# 规范化因子
# 权值更新
def predict(self, data):
'''
input:data(ndarray):单个样本
output:预测为正样本返回+1,负样本返回-1
'''
ada = AdaBoostClassifier(n_estimators=100, learning_rate=0.1)
ada.fit(self.X, self.Y)
data = data.reshape(1, -1)
predict = ada.predict(data)
return predict[0]
# ********* End *********#
第3关:sklearn中的Adaboost
#encoding=utf8
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
def ada_classifier(train_data,train_label,test_data):
'''
input:train_data(ndarray):训练数据
train_label(ndarray):训练标签
test_data(ndarray):测试标签
output:predict(ndarray):预测结果
'''
#********* Begin *********#
ada=AdaBoostClassifier(n_estimators=100,learning_rate=1.0)
ada.fit(train_data,train_label)
predict = ada.predict(test_data)
#********* End *********#
return predict
第2关:线性支持向量机
#encoding=utf8
from sklearn.svm import LinearSVC
def linearsvc_predict(train_data,train_label,test_data):
'''
input:train_data(ndarray):训练数据
train_label(ndarray):训练标签
output:predict(ndarray):测试集预测标签
'''
#********* Begin *********#
clf = LinearSVC(dual=False)
clf.fit(train_data,train_label)
predict = clf.predict(test_data)
#********* End *********#
return predict
第3关:非线性支持向量机
#encoding=utf8
from sklearn.svm import SVC
def svc_predict(train_data,train_label,test_data,kernel):
'''
input:train_data(ndarray):训练数据
train_label(ndarray):训练标签
kernel(str):使用核函数类型:
'linear':线性核函数
'poly':多项式核函数
'rbf':径像核函数/高斯核
output:predict(ndarray):测试集预测标签
'''
#********* Begin *********#
clf =SVC(kernel=kernel)
clf.fit(train_data,train_label)
predict = clf.predict(test_data)
#********* End *********#
return predict
第4关:序列最小优化算法
#encoding=utf8
import numpy as np
class smo:
def __init__(self, max_iter=100, kernel='linear'):
'''
input:max_iter(int):最大训练轮数
kernel(str):核函数,等于'linear'表示线性,等于'poly'表示多项式
'''
self.max_iter = max_iter
self._kernel = kernel
#初始化模型
def init_args(self, features, labels):
self.m, self.n = features.shape
self.X = features
self.Y = labels
self.b = 0.0
# 将Ei保存在一个列表里
self.alpha = np.ones(self.m)
self.E = [self._E(i) for i in range(self.m)]
# 错误惩罚参数
self.C = 1.0
#********* Begin *********#
#kkt条件
def _KKT(self, i):
y_g = self._g(i)*self.Y[i]
if self.alpha[i] == 0:
return y_g >= 1
elif 0 < self.alpha[i] < self.C:
return y_g == 1
else:
return y_g <= 1
# g(x)预测值,输入xi(X[i])
def _g(self, i):
r = self.b
for j in range(self.m):
r += self.alpha[j]*self.Y[j]*self.kernel(self.X[i], self.X[j])
return r
# 核函数,多项式添加二次项即可
def kernel(self, x1, x2):
if self._kernel == 'linear':
return sum([x1[k]*x2[k] for k in range(self.n)])
elif self._kernel == 'poly':
return (sum([x1[k]*x2[k] for k in range(self.n)]) + 1)**2
return 0
# E(x)为g(x)对输入x的预测值和y的差
def _E(self, i):
return self._g(i) - self.Y[i]
#初始alpha
def _init_alpha(self):
# 外层循环首先遍历所有满足0<a<C的样本点,检验是否满足KKT
index_list = [i for i in range(self.m) if 0 < self.alpha[i] < self.C]
# 否则遍历整个训练集
non_satisfy_list = [i for i in range(self.m) if i not in index_list]
index_list.extend(non_satisfy_list)
for i in index_list:
if self._KKT(i):
continue
E1 = self.E[i]
# 如果E2是+,选择最小的;如果E2是负的,选择最大的
if E1 >= 0:
j = min(range(self.m), key=lambda x: self.E[x])
else:
j = max(range(self.m), key=lambda x: self.E[x])
return i, j
#选择alpha参数
def _compare(self, _alpha, L, H):
if _alpha > H:
return H
elif _alpha < L:
return L
else:
return _alpha
#训练
def fit(self, features, labels):
'''
input:features(ndarray):特征
label(ndarray):标签
'''
self.init_args(features, labels)
for t in range(self.max_iter):
i1, i2 = self._init_alpha()
# 边界
if self.Y[i1] == self.Y[i2]:
L = max(0, self.alpha[i1]+self.alpha[i2]-self.C)
H = min(self.C, self.alpha[i1]+self.alpha[i2])
else:
L = max(0, self.alpha[i2]-self.alpha[i1])
H = min(self.C, self.C+self.alpha[i2]-self.alpha[i1])
E1 = self.E[i1]
E2 = self.E[i2]
# eta=K11+K22-2K12
eta = self.kernel(self.X[i1], self.X[i1]) + self.kernel(self.X[i2], self.X[i2]) - 2*self.kernel(self.X[i1], self.X[i2])
if eta <= 0:
continue
alpha2_new_unc = self.alpha[i2] + self.Y[i2] * (E2 - E1) / eta
alpha2_new = self._compare(alpha2_new_unc, L, H)
alpha1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (self.alpha[i2] - alpha2_new)
b1_new = -E1 - self.Y[i1] * self.kernel(self.X[i1], self.X[i1]) * (alpha1_new-self.alpha[i1]) - self.Y[i2] * self.kernel(self.X[i2], self.X[i1]) * (alpha2_new-self.alpha[i2])+ self.b
b2_new = -E2 - self.Y[i1] * self.kernel(self.X[i1], self.X[i2]) * (alpha1_new-self.alpha[i1]) - self.Y[i2] * self.kernel(self.X[i2], self.X[i2]) * (alpha2_new-self.alpha[i2])+ self.b
if 0 < alpha1_new < self.C:
b_new = b1_new
elif 0 < alpha2_new < self.C:
b_new = b2_new
else:
# 选择中点
b_new = (b1_new + b2_new) / 2
# 更新参数
self.alpha[i1] = alpha1_new
self.alpha[i2] = alpha2_new
self.b = b_new
self.E[i1] = self._E(i1)
self.E[i2] = self._E(i2)
def predict(self, data):
'''
input:data(ndarray):单个样本
output:预测为正样本返回+1,负样本返回-1
'''
r = self.b
for i in range(self.m):
r += self.alpha[i] * self.Y[i] * self.kernel(data, self.X[i])
return 1 if r > 0 else -1
#********* End *********#
第5关:支持向量回归
#encoding=utf8
from sklearn.svm import SVR
def svr_predict(train_data,train_label,test_data):
'''
input:train_data(ndarray):训练数据
train_label(ndarray):训练标签
output:predict(ndarray):测试集预测标签
'''
#********* Begin *********#
svr = SVR(kernel='rbf',C=100,gamma= 0.001,epsilon=0.1)
svr.fit(train_data,train_label)
predict = svr.predict(test_data)
#********* End *********#
return predict
第2关:决策树算法原理
#encoding=utf8
import numpy as np
def mse_score(y_predict,y_test):
'''
input:y_predict(ndarray):预测值
y_test(ndarray):真实值
ouput:mse(float):mse损失函数值
'''
#********* Begin *********#
mse = np.mean((y_predict-y_test)**2)
#********* End *********#
return mse
class LinearRegression :
def __init__(self):
'''初始化线性回归模型'''
self.theta = None
def fit_normal(self,train_data,train_label):
'''
input:train_data(ndarray):训练样本
train_label(ndarray):训练标签
'''
#********* Begin *********#
x = np.hstack([np.ones((len(train_data),1)),train_data])
self.theta =np.linalg.inv(x.T.dot(x)).dot(x.T).dot(train_label)
#********* End *********#
return self.theta
def predict(self,test_data):
'''
input:test_data(ndarray):测试样本
'''
#********* Begin *********#
x = np.hstack([np.ones((len(test_data),1)),test_data])
return x.dot(self.theta)
#********* End *********#
第3关:动手实现ID3决策树
import numpy as np
# 计算熵
def calcInfoEntropy(label):
'''
input:
label(narray):样本标签
output:
InfoEntropy(float):熵
'''
label_set = set(label)
InfoEntropy = 0
for l in label_set:
count = 0
for j in range(len(label)):
if label[j] == l:
count += 1
# 计算标签在数据集中出现的概率
p = count / len(label)
# 计算熵
InfoEntropy -= p * np.log2(p)
return InfoEntropy
#计算条件熵
def calcHDA(feature,label,index,value):
'''
input:
feature(ndarray):样本特征
label(ndarray):样本标签
index(int):需要使用的特征列索引
value(int):index所表示的特征列中需要考察的特征值
output:
HDA(float):信息熵
'''
count = 0
# sub_feature和sub_label表示根据特征列和特征值分割出的子数据集中的特征和标签
sub_feature = []
sub_label = []
for i in range(len(feature)):
if feature[i][index] == value:
count += 1
sub_feature.append(feature[i])
sub_label.append(label[i])
pHA = count / len(feature)
e = calcInfoEntropy(sub_label)
HDA = pHA * e
return HDA
#计算信息增益
def calcInfoGain(feature, label, index):
'''
input:
feature(ndarry):测试用例中字典里的feature
label(ndarray):测试用例中字典里的label
index(int):测试用例中字典里的index,即feature部分特征列的索引。该索引指的是feature中第几个特征,如index:0表示使用第一个特征来计算信息增益。
output:
InfoGain(float):信息增益
'''
base_e = calcInfoEntropy(label)
f = np.array(feature)
# 得到指定特征列的值的集合
f_set = set(f[:, index])
sum_HDA = 0
# 计算条件熵
for value in f_set:
sum_HDA += calcHDA(feature, label, index, value)
# 计算信息增益
InfoGain = base_e - sum_HDA
return InfoGain
# 获得信息增益最高的特征
def getBestFeature(feature, label):
'''
input:
feature(ndarray):样本特征
label(ndarray):样本标签
output:
best_feature(int):信息增益最高的特征
'''
#*********Begin*********#
max_infogain = 0
best_feature = 0
for i in range(len(feature[0])):
infogain = calcInfoGain(feature, label, i)
if infogain > max_infogain:
max_infogain = infogain
best_feature = i
#*********End*********#
return best_feature
#创建决策树
def createTree(feature, label):
'''
input:
feature(ndarray):训练样本特征
label(ndarray):训练样本标签
output:
tree(dict):决策树模型
'''
#*********Begin*********#
# 样本里都是同一个label没必要继续分叉了
if len(set(label)) == 1:
return label[0]
# 样本中只有一个特征或者所有样本的特征都一样的话就看哪个label的票数高
if len(feature[0]) == 1 or len(np.unique(feature, axis=0)) == 1:
vote = {}
for l in label:
if l in vote.keys():
vote[l] += 1
else:
vote[l] = 1
max_count = 0
vote_label = None
for k, v in vote.items():
if v > max_count:
max_count = v
vote_label = k
return vote_label
# 根据信息增益拿到特征的索引
best_feature = getBestFeature(feature, label)
tree = {best_feature: {}}
f = np.array(feature)
# 拿到bestfeature的所有特征值
f_set = set(f[:, best_feature])
# 构建对应特征值的子样本集sub_feature, sub_label
for v in f_set:
sub_feature = []
sub_label = []
for i in range(len(feature)):
if feature[i][best_feature] == v:
sub_feature.append(feature[i])
sub_label.append(label[i])
# 递归构建决策树
tree[best_feature][v] = createTree(sub_feature, sub_label)
#*********End*********#
return tree
#决策树分类
def dt_clf(train_feature,train_label,test_feature):
'''
input:
train_feature(ndarray):训练样本特征
train_label(ndarray):训练样本标签
test_feature(ndarray):测试样本特征
output:
predict(ndarray):测试样本预测标签
'''
#*********Begin*********#
result = []
tree = createTree(train_feature,train_label)
def classify(tree, feature):
if not isinstance(tree, dict):
return tree
t_index, t_value = list(tree.items())[0]
f_value = feature[t_index]
if isinstance(t_value, dict):
classLabel = classify(tree[t_index][f_value], feature)
return classLabel
else:
return t_value
for feature in test_feature:
result.append(classify(tree, feature))
predict = np.array(result)
#*********End*********#
return predict