决策树学习算法包括三部分:特征选择、树的生成和树的剪枝。常用的算法有ID3、C4.5和CART。
特征选择的目的在于选取对训练数据能够分类的特征。特征选择的关键是其准则。常用的准则如下:
(1)特征 A A A对训练数据集 D D D的信息增益(ID3)
g ( D , A ) = H ( D ) − H ( D ∣ A ) g(D, A)=H(D)-H(D|A) g(D,A)=H(D)−H(D∣A)
其中, H ( D ) = − ∑ k = 1 K ∣ C k ∣ ∣ D ∣ log 2 ∣ C k ∣ ∣ D ∣ H(D)=-\sum_{k=1}^{K} \frac{\left|C_{k}\right|}{|D|} \log _{2} \frac{\left|C_{k}\right|}{|D|} H(D)=−k=1∑K∣D∣∣Ck∣log2∣D∣∣Ck∣
H ( D ∣ A ) = ∑ i = 1 n ∣ D i ∣ ∣ D ∣ H ( D i ) H(D | A)=\sum_{i=1}^{n} \frac{\left|D_{i}\right|}{|D|} H\left(D_{i}\right) H(D∣A)=i=1∑n∣D∣∣Di∣H(Di)
H ( D ) H(D) H(D)是数据集 D D D的熵, H ( D i ) H(D_i) H(Di)是数据集 D i D_i Di的熵, H ( D ∣ A ) H(D|A) H(D∣A)是数据集 D D D对特征 A A A的条件熵。 D i D_i Di是 D D D中特征 A A A取第 i i i个值的样本子集, C k C_k Ck是 D D D中属于第 k k k类的样本子集。 n n n是特征 A A A取值的个数, K K K是类的个数。
(2)特征 A A A对训练数据集 D D D的信息增益比(C4.5)
g
R
(
D
,
A
)
=
g
(
D
,
A
)
H
A
(
D
)
g_{R}(D, A)=\frac{g(D, A)}{H_{A}(D)}
gR(D,A)=HA(D)g(D,A)
其中,
H
A
(
D
)
=
−
∑
i
=
1
n
∣
D
i
∣
∣
D
∣
log
2
∣
D
i
∣
∣
D
∣
{H_{A}(D)}=-\sum_{i=1}^{n}\frac{|D_{i}|}{|D|}\log_{2}\frac{|D_{i}|}{|D|}
HA(D)=−i=1∑n∣D∣∣Di∣log2∣D∣∣Di∣
g ( D , A ) g(D,A) g(D,A)是信息增益, H A ( D ) H_{A}(D) HA(D)是训练数据集 D D D关于特征A的值的熵, n n n是特征 A A A取值的个数。
(3)样本集合 D D D的基尼指数(CART)
Gini ( D ) = 1 − ∑ k = 1 K ( ∣ C k ∣ ∣ D ∣ ) 2 \operatorname{Gini}(D)=1-\sum_{k=1}^{K}\left(\frac{\left|C_{k}\right|}{|D|}\right)^{2} Gini(D)=1−k=1∑K(∣D∣∣Ck∣)2
特征 A A A条件下集合 D D D的基尼指数:
Gini ( D , A ) = ∣ D 1 ∣ ∣ D ∣ Gini ( D 1 ) + ∣ D 2 ∣ ∣ D ∣ Gini ( D 2 ) \operatorname{Gini}(D, A)=\frac{\left|D_{1}\right|}{|D|} \operatorname{Gini}\left(D_{1}\right)+\frac{\left|D_{2}\right|}{|D|} \operatorname{Gini}\left(D_{2}\right) Gini(D,A)=∣D∣∣D1∣Gini(D1)+∣D∣∣D2∣Gini(D2)
决策树的生成。通常使用信息增益最大、信息增益比最大或基尼指数最小作为特征选择的准则,不断地选取局部最优的特征。
决策树的剪枝。由于生成的决策树存在过拟合问题,需要对它进行剪枝,以简化学到的决策树。决策树的剪枝,往往从已生成的树上剪掉一些叶结点或叶结点以上的子树,并将其父结点或根结点作为新的叶结点,从而简化生成的决策树。
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from collections import Counter
import math
from math import log
import pprint
书上题目5.1
# 获取数据
def create_data():
datasets = [
['青年', '否', '否', '一般', '否'],
['青年', '否', '否', '好', '否'],
['青年', '是', '否', '好', '是'],
['青年', '是', '是', '一般', '是'],
['青年', '否', '否', '一般', '否'],
['中年', '否', '否', '一般', '否'],
['中年', '否', '否', '好', '否'],
['中年', '是', '是', '好', '是'],
['中年', '否', '是', '非常好', '是'],
['中年', '否', '是', '非常好', '是'],
['老年', '否', '是', '非常好', '是'],
['老年', '否', '是', '好', '是'],
['老年', '是', '否', '好', '是'],
['老年', '是', '否', '非常好', '是'],
['老年', '否', '否', '一般', '否'],
]
colunms = ['年龄', '有工作', '有自己的房子', '信贷情况', '类别']
# 返回数据集和每个维度的名称
return datasets, colunms
datasets, colunms = create_data()
train_data = pd.DataFrame(data=datasets, columns=colunms)
# 熵
def calc_ent(datasets):
data_length = len(datasets)
label_count = {}
for i in range(data_length):
label = datasets[i][-1]
if label not in label_count:
label_count[label] = 0
label_count[label] += 1
ent = -sum([(p / data_length) * log(p / data_length, 2)
for p in label_count.values()])
return ent
# def entropy(y):
# """
# Entropy of a label sequence
# """
# hist = np.bincount(y)
# ps = hist / np.sum(hist)
# return -np.sum([p * np.log2(p) for p in ps if p > 0])
# 经验条件熵
def cond_ent(datasets, axis=0): # axis表示选第几个特征,默认为0
data_length = len(datasets)
feature_sets = {}
for i in range(data_length):
feature = datasets[i][axis]
if feature not in feature_sets:
feature_sets[feature] = []
feature_sets[feature].append(datasets[i])
cond_ent = sum([(len(p) / data_length) * calc_ent(p)
for p in feature_sets.values()])
return cond_ent
# 信息增益
def info_gain(ent, cond_ent):
return ent - cond_ent
def info_gain_train(datasets):
count = len(datasets[0]) - 1
ent = calc_ent(datasets)
# ent = entropy(datasets)
best_feature = []
for c in range(count):
c_info_gain = info_gain(ent, cond_ent(datasets, axis=c))
best_feature.append((c, c_info_gain))
print('特征({}) - info_gain - {:.3f}'.format(colunms[c], c_info_gain))
# 比较大小,key=lambda x: x[-1] 提取每个元组的最后一个元素(即信息增益的数值)作为比较关键字
best_ = max(best_feature, key=lambda x: x[-1])
# print(best_) # (2, 0.4199730940219749)
return '特征({})的信息增益最大,选择为根节点特征'.format(colunms[best_[0]])
info_gain_train(np.array(datasets))
特征(年龄) - info_gain - 0.083
特征(有工作) - info_gain - 0.324
特征(有自己的房子) - info_gain - 0.420
特征(信贷情况) - info_gain - 0.363
'特征(有自己的房子)的信息增益最大,选择为根节点特征'
利用上述训练数据集使用ID3算法生成决策树,例5.3
# 定义节点类 二叉树
class Node:
def __init__(self,root=True,label=None,feature_name=None,feature=None):
self.root=root
self.label=label
self.feature_name=feature_name
self.feature=feature
self.tree={}
self.result={
'label:':self.label,
'feature':self.feature,
'tree':self.tree
}
def __repr__(self):
return '{}'.format(self.result)
def add_node(self,val,node):
self.tree[val]=node
def predict(self,features):
if self.root is True:
return self.label
return self.tree[features[self.feature]].predict(features)
class DTree:
def __init__(self, epsilon=0.1):
self.epsilon = epsilon
self._tree = {}
# 熵
@staticmethod
def calc_ent(datasets):
data_length = len(datasets)
label_count = {}
for i in range(data_length):
label = datasets[i][-1]
if label not in label_count:
label_count[label] = 0
label_count[label] += 1
ent = -sum([(p / data_length) * log(p / data_length, 2)
for p in label_count.values()])
return ent
# 经验条件熵
def cond_ent(self, datasets, axis=0):
data_length = len(datasets)
feature_sets = {}
for i in range(data_length):
feature = datasets[i][axis]
if feature not in feature_sets:
feature_sets[feature] = []
feature_sets[feature].append(datasets[i])
cond_ent = sum([(len(p) / data_length) * self.calc_ent(p)
for p in feature_sets.values()])
return cond_ent
# 信息增益
@staticmethod
def info_gain(ent, cond_ent):
return ent - cond_ent
def info_gain_train(self, datasets):
count = len(datasets[0]) - 1
ent = self.calc_ent(datasets)
best_feature = []
for c in range(count):
c_info_gain = self.info_gain(ent, self.cond_ent(datasets, axis=c))
best_feature.append((c, c_info_gain))
# 比较大小
best_ = max(best_feature, key=lambda x: x[-1])
return best_
def train(self, train_data):
"""
input:数据集D(DataFrame格式),特征集A,阈值eta
output:决策树T
"""
_, y_train, features = train_data.iloc[:, :
-1], train_data.iloc[:,
-1], train_data.columns[:
-1]
# 1,若D中实例属于同一类Ck,则T为单节点树,并将类Ck作为结点的类标记,返回T
if len(y_train.value_counts()) == 1:
return Node(root=True, label=y_train.iloc[0])
# 2, 若A为空,则T为单节点树,将D中实例树最大的类Ck作为该节点的类标记,返回T
if len(features) == 0:
return Node(
root=True,
label=y_train.value_counts().sort_values(
ascending=False).index[0])
# 3,计算最大信息增益 同5.1,Ag为信息增益最大的特征
max_feature, max_info_gain = self.info_gain_train(np.array(train_data))
max_feature_name = features[max_feature]
# 4,Ag的信息增益小于阈值eta,则置T为单节点树,并将D中是实例数最大的类Ck作为该节点的类标记,返回T
if max_info_gain < self.epsilon:
return Node(
root=True,
label=y_train.value_counts().sort_values(
ascending=False).index[0])
# 5,构建Ag子集
node_tree = Node(
root=False, feature_name=max_feature_name, feature=max_feature)
# 获取特征列中的所有不同值
feature_list = train_data[max_feature_name].value_counts().index
for f in feature_list:
sub_train_df = train_data.loc[train_data[max_feature_name] ==
f].drop([max_feature_name], axis=1)
# 6, 递归生成树
sub_tree = self.train(sub_train_df)
node_tree.add_node(f, sub_tree)
# pprint.pprint(node_tree.tree)
return node_tree
def fit(self, train_data):
self._tree = self.train(train_data)
return self._tree
def predict(self, X_test):
return self._tree.predict(X_test)
datasets, colunms = create_data()
data_df = pd.DataFrame(datasets, columns=colunms)
dt = DTree()
tree = dt.fit(data_df)
tree
{'label:': None, 'feature': 2, 'tree': {'否': {'label:': None, 'feature': 1, 'tree': {'否': {'label:': '否', 'feature': None, 'tree': {}}, '是': {'label:': '是', 'feature': None, 'tree': {}}}}, '是': {'label:': '是', 'feature': None, 'tree': {}}}}
dt.predict(['老年', '否', '否', '一般'])
'否'
使用sklearn
from sklearn.tree import DecisionTreeClassifier,export_graphviz
from IPython.display import Image
import graphviz
import pydotplus
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = [
'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
]
data = np.array(df.iloc[:100, [0, 1, -1]])
# print(data)
X, y = data[:, :-1], data[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
clf = DecisionTreeClassifier()
clf.fit(
X_train,
y_train,
)
print(clf.score(X_test, y_test))
# 导出决策树为DOT格式
dot_data = export_graphviz(
clf,
out_file=None,
feature_names=['sepal length', 'sepal width'],
class_names=iris.target_names,
filled=True, # 用于控制节点是否填充颜色
rounded=True, # 控制节点的形状是否是圆角矩形
special_characters=True # 用于控制是否使用特殊字符(例如Unicode字符)来显示更复杂的特征名称或类别名称
)
# 使用pydotplus将DOT文件转换为图像
graph = pydotplus.graph_from_dot_data(dot_data)
# 保存为PDF文件
# graph.write_pdf("mytree.pdf")
# 保存为PNG文件
# graph.write_png("mytree.png")
Image(graph.create_png())
0.8333333333333334
参考文章:
https://github.com/fengdu78/lihang-code
(1) 决策树案例:泰坦尼克号幸存者的预测