EX 5: Linear and Quadratic Discriminant Analysis with confidence ellipsoid

分類法/範例五:Linear and Quadratic Discriminant Analysis with confidence ellipsoid

線性判別以及二次判別的比較

(一)資料產生function

這個範例引入的套件,主要特點在: 1. scipy.linalg:線性代數相關函式,這裏主要使用到linalg.eigh 特徵值相關問題 2. matplotlib.colors: 用來處理繪圖時的色彩分佈 3. LinearDiscriminantAnalysis:線性判別演算法 4. QuadraticDiscriminantAnalysis:二次判別演算法
1
%matplotlib inline
2
from scipy import linalg
3
import numpy as np
4
import matplotlib.pyplot as plt
5
import matplotlib as mpl
6
from matplotlib import colors
7
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
8
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
Copied!
接下來是設定一個線性變化的colormap,LinearSegmentedColormap(name, segmentdata) 預設會傳回一個256個值的數值顏色對應關係。用一個具備有三個項目的dict變數segmentdata來設定。以'red': [(0, 1, 1), (1, 0.7, 0.7)]來解釋,就是我們希望數值由0到1的過程,紅色通道將由1線性變化至0.7。
1
cmap = colors.LinearSegmentedColormap(
2
'red_blue_classes',
3
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
4
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
5
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
6
plt.cm.register_cmap(cmap=cmap)
Copied!
我們可以用以下程式碼來觀察。當輸入數值為np.arange(0,1.1,0.1)也就是0,0.1...,1.0 時RGB數值的變化情形。
1
values = np.arange(0,1.1,0.1)
2
cmap_values = mpl.cm.get_cmap('red_blue_classes')(values)
3
import pandas as pd
4
pd.set_option('precision',2)
5
df=pd.DataFrame(np.hstack((values.reshape(11,1),cmap_values)))
6
df.columns = ['Value', 'R', 'G', 'B', 'Alpha']
7
print(df)
Copied!
1
Value R G B Alpha
2
0 0.0 1.0 0.7 0.7 1
3
1 0.1 1.0 0.7 0.7 1
4
2 0.2 0.9 0.7 0.8 1
5
3 0.3 0.9 0.7 0.8 1
6
4 0.4 0.9 0.7 0.8 1
7
5 0.5 0.8 0.7 0.9 1
8
6 0.6 0.8 0.7 0.9 1
9
7 0.7 0.8 0.7 0.9 1
10
8 0.8 0.8 0.7 0.9 1
11
9 0.9 0.7 0.7 1.0 1
12
10 1.0 0.7 0.7 1.0 1
Copied!
接著我們產生兩組資料, 每組資料有 600筆資料,2個特徵 X: 600x2以及2個類別 y:600 (前300個元素為0,餘下為1): 1. dataset_fixed_cov():2個類別的特徵具備有相同共變數(covariance) 2. dataset_fixed_cov():2個類別的特徵具備有不同之共變數 差異落在X資料的產生np.dot(np.random.randn(n, dim), C)np.dot(np.random.randn(n, dim), C.T)的不同。np.dot(np.random.randn(n, dim), C)會產生300x2之矩陣,其亂數產生的範圍可交由C矩陣來控制。在dataset_fixed_cov()中,前後300筆資料產生之範圍皆由C來調控。我們可以在最下方的結果圖示看到上排影像(相同共變數)的資料分佈無論是紅色(代表類別1)以及藍色(代表類別2)其分佈形狀相似。而下排影像(不同共變數),分佈形狀則不同。圖示中,橫軸及縱軸分別表示第一及第二個特徵,讀者可以試著將 0.83這個數字減少或是將C.T改成C,看看最後結果圖形有了什麼改變?
1
def dataset_fixed_cov():
2
'''Generate 2 Gaussians samples with the same covariance matrix'''
3
n, dim = 300, 2
4
np.random.seed(0)
5
C = np.array([[0., -0.23], [0.83, .23]])
6
X = np.r_[np.dot(np.random.randn(n, dim), C),
7
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])] #利用 + np.array([1, 1]) 產生類別間的差異
8
y = np.hstack((np.zeros(n), np.ones(n))) #產生300個零及300個1並連接起來
9
return X, y
10
11
def dataset_cov():
12
'''Generate 2 Gaussians samples with different covariance matrices'''
13
n, dim = 300, 2
14
np.random.seed(0)
15
C = np.array([[0., -1.], [2.5, .7]]) * 2.
16
X = np.r_[np.dot(np.random.randn(n, dim), C),
17
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
18
y = np.hstack((np.zeros(n), np.ones(n)))
19
return X, y
Copied!

(二)繪圖函式

    1.
    找出 True Positive及False Negative 之辨認點
    2.
    以紅色及藍色分別表示分類為 0及1的資料點,而以深紅跟深藍來表示誤判資料
    3.
    lda.predict_proba()畫出分類的機率分佈(請參考範例三)
(為了方便在ipython notebook環境下顯示,下面函式有經過微調)
1
def plot_data(lda, X, y, y_pred, fig_index):
2
splot = plt.subplot(2, 2, fig_index)
3
if fig_index == 1:
4
plt.title('Linear Discriminant Analysis',fontsize=28)
5
plt.ylabel('Data with fixed covariance',fontsize=28)
6
elif fig_index == 2:
7
plt.title('Quadratic Discriminant Analysis',fontsize=28)
8
elif fig_index == 3:
9
plt.ylabel('Data with varying covariances',fontsize=28)
10
11
# 步驟一:找出 True Positive及False postive 之辨認點
12
13
tp = (y == y_pred) # True Positive
14
tp0, tp1 = tp[y == 0], tp[y == 1] #tp0 代表分類為0且列為 True Positive之資料點
15
X0, X1 = X[y == 0], X[y == 1]
16
X0_tp, X0_fp = X0[tp0], X0[~tp0]
17
X1_tp, X1_fp = X1[tp1], X1[~tp1]
18
19
# 步驟二:以紅藍來畫出分類資料,以深紅跟深藍來表示誤判資料
20
21
# class 0: dots
22
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
23
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
24
25
# class 1: dots
26
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
27
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
28
29
#步驟三:畫出分類的機率分佈(請參考範例三)
30
# class 0 and 1 : areas
31
nx, ny = 200, 100
32
x_min, x_max = plt.xlim()
33
y_min, y_max = plt.ylim()
34
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
35
np.linspace(y_min, y_max, ny))
36
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
37
Z = Z[:, 1].reshape(xx.shape)
38
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
39
norm=colors.Normalize(0., 1.))
40
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
41
42
# means
43
plt.plot(lda.means_[0][0], lda.means_[0][1],
44
'o', color='black', markersize=10)
45
plt.plot(lda.means_[1][0], lda.means_[1][1],
46
'o', color='black', markersize=10)
47
48
return splot
Copied!
1
def plot_ellipse(splot, mean, cov, color):
2
v, w = linalg.eigh(cov)
3
u = w[0] / linalg.norm(w[0])
4
angle = np.arctan(u[1] / u[0])
5
angle = 180 * angle / np.pi # convert to degrees
6
# filled Gaussian at 2 standard deviation
7
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
8
180 + angle, color=color)
9
ell.set_clip_box(splot.bbox)
10
ell.set_alpha(0.5)
11
splot.add_artist(ell)
12
splot.set_xticks(())
13
splot.set_yticks(())
Copied!

(三)測試資料並繪圖

1
def plot_lda_cov(lda, splot):
2
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
3
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
4
5
6
def plot_qda_cov(qda, splot):
7
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
8
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
9
10
###############################################################################
11
figure = plt.figure(figsize=(30,20), dpi=300)
12
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
13
# Linear Discriminant Analysis
14
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
15
y_pred = lda.fit(X, y).predict(X)
16
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
17
plot_lda_cov(lda, splot)
18
plt.axis('tight')
19
20
# Quadratic Discriminant Analysis
21
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
22
y_pred = qda.fit(X, y).predict(X)
23
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
24
plot_qda_cov(qda, splot)
25
plt.axis('tight')
26
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis',fontsize=28)
27
plt.show()
Copied!
png
Python source code: plot_lda_qda.py
1
print(__doc__)
2
3
from scipy import linalg
4
import numpy as np
5
import matplotlib.pyplot as plt
6
import matplotlib as mpl
7
from matplotlib import colors
8
9
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
10
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
11
12
###############################################################################
13
# colormap
14
cmap = colors.LinearSegmentedColormap(
15
'red_blue_classes',
16
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
17
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
18
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
19
plt.cm.register_cmap(cmap=cmap)
20
21
22
###############################################################################
23
# generate datasets
24
def dataset_fixed_cov():
25
'''Generate 2 Gaussians samples with the same covariance matrix'''
26
n, dim = 300, 2
27
np.random.seed(0)
28
C = np.array([[0., -0.23], [0.83, .23]])
29
X = np.r_[np.dot(np.random.randn(n, dim), C),
30
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
31
y = np.hstack((np.zeros(n), np.ones(n)))
32
return X, y
33
34
35
def dataset_cov():
36
'''Generate 2 Gaussians samples with different covariance matrices'''
37
n, dim = 300, 2
38
np.random.seed(0)
39
C = np.array([[0., -1.], [2.5, .7]]) * 2.
40
X = np.r_[np.dot(np.random.randn(n, dim), C),
41
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
42
y = np.hstack((np.zeros(n), np.ones(n)))
43
return X, y
44
45
46
###############################################################################
47
# plot functions
48
def plot_data(lda, X, y, y_pred, fig_index):
49
splot = plt.subplot(2, 2, fig_index)
50
if fig_index == 1:
51
plt.title('Linear Discriminant Analysis')
52
plt.ylabel('Data with fixed covariance')
53
elif fig_index == 2:
54
plt.title('Quadratic Discriminant Analysis')
55
elif fig_index == 3:
56
plt.ylabel('Data with varying covariances')
57
58
tp = (y == y_pred) # True Positive
59
tp0, tp1 = tp[y == 0], tp[y == 1]
60
X0, X1 = X[y == 0], X[y == 1]
61
X0_tp, X0_fp = X0[tp0], X0[~tp0]
62
X1_tp, X1_fp = X1[tp1], X1[~tp1]
63
64
# class 0: dots
65
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
66
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
67
68
# class 1: dots
69
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
70
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
71
72
# class 0 and 1 : areas
73
nx, ny = 200, 100
74
x_min, x_max = plt.xlim()
75
y_min, y_max = plt.ylim()
76
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
77
np.linspace(y_min, y_max, ny))
78
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
79
Z = Z[:, 1].reshape(xx.shape)
80
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
81
norm=colors.Normalize(0., 1.))
82
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
83
84
# means
85
plt.plot(lda.means_[0][0], lda.means_[0][1],
86
'o', color='black', markersize=10)
87
plt.plot(lda.means_[1][0], lda.means_[1][1],
88
'o', color='black', markersize=10)
89
90
return splot
91
92
93
def plot_ellipse(splot, mean, cov, color):
94
v, w = linalg.eigh(cov)
95
u = w[0] / linalg.norm(w[0])
96
angle = np.arctan(u[1] / u[0])
97
angle = 180 * angle / np.pi # convert to degrees
98
# filled Gaussian at 2 standard deviation
99
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
100
180 + angle, color=color)
101
ell.set_clip_box(splot.bbox)
102
ell.set_alpha(0.5)
103
splot.add_artist(ell)
104
splot.set_xticks(())
105
splot.set_yticks(())
106
107
108
def plot_lda_cov(lda, splot):
109
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
110
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
111
112
113
def plot_qda_cov(qda, splot):
114
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
115
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
116
117
###############################################################################
118
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
119
# Linear Discriminant Analysis
120
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
121
y_pred = lda.fit(X, y).predict(X)
122
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
123
plot_lda_cov(lda, splot)
124
plt.axis('tight')
125
126
# Quadratic Discriminant Analysis
127
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
128
y_pred = qda.fit(X, y).predict(X)
129
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
130
plot_qda_cov(qda, splot)
131
plt.axis('tight')
132
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
133
plt.show()
Copied!
1
Copied!

(一)引入函式庫

引入函式如下:
    1.
    numpy : 產生陣列數值
    2.
    matplotlib.pyplot : 用來繪製影像
    3.
    sklearn import datasets, cluster : datasets : 用來繪入內建之手寫數字資料庫 ; cluster : 其內收集非監督clustering演算法
    4.
    sklearn.feature_extraction.image import grid_to_graph : 定義資料的結構