一. K-means实现
- 读取数据
import pandas as pd
beer = pd.read_csv('data.txt', sep=' ')
beer
- 取出其中四列
X = beer[["calories","sodium","alcohol","cost"]]
X
- 使用K-means对X进行分类
from sklearn.cluster import KMeans
# 分别取3个和2个质心的情况
km = KMeans(n_clusters=3).fit(X)
km2 = KMeans(n_clusters=2).fit(X)
km.labels_ # 每个样本点对应的分类
- 根据分类进行排序
beer['cluster'] = km.labels_
beer['cluster2'] = km2.labels_
beer.sort_values('cluster')
- 计算cluster=3时的平均值
beer.groupby('cluster').mean()
- 计算cluster=2时的平均值
beer.groupby('cluster2').mean()
- 利用reset_index重置索引
centers = beer.groupby('cluster').mean().reset_index()
centers
- 分类效果的可视化
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 14
colors = np.array(['red', 'green', 'blue', 'yellow'])
# 数据分布三点图
plt.scatter(beer['calories'], beer['alcohol'], c=colors[beer['cluster']])
# 关于 calories alcohol 的三个类的平均值分布
plt.scatter(centers.calories, centers.alcohol, linewidth=3, marker='+', s=300, c='black')
plt.xlabel('Calories')
plt.ylabel('Alcohol')
- 计算不同cluster每个分类不同特征的centers值
from pandas.tools.plotting import scatter_matrix
%matplotlib inline
cluster_centers = km.cluster_centers_
cluster_centers_2 = km2.cluster_centers_
print (cluster_centers)
print (cluster_centers_2)
- 基于步骤9,cluster=3 进行可视化
scatter_matrix(beer[["calories","sodium","alcohol","cost"]],s=100, alpha=1, c=colors[beer["cluster"]], figsize=(10,10))
plt.suptitle("With 3 centroids initialized")
- 基于步骤9,cluster=2 进行可视化
scatter_matrix(beer[["calories","sodium","alcohol","cost"]],s=100, alpha=1, c=colors[beer["cluster2"]], figsize=(10,10))
plt.suptitle("With 2 centroids initialized")
二. 数据标准化
- 数据标准化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_scaled
- 使用标准化后的数据生成新的cluster=3,且应用于原始数据进行分类
km = KMeans(n_clusters=3).fit(X_scaled)
beer["scaled_cluster"] = km.labels_
beer.sort_values("scaled_cluster")
- 求mean值
beer.groupby("scaled_cluster").mean()
- 可视化
pd.scatter_matrix(X, c=colors[beer.scaled_cluster], alpha=1, figsize=(10,10), s=100)
三:聚类评估:轮廓系数(Silhouette Coefficient )
- 计算当前分类下的轮廓系数
from sklearn import metrics
score_scaled = metrics.silhouette_score(X,beer.scaled_cluster) #数据标准化的轮廓系数
score = metrics.silhouette_score(X,beer.cluster) #原始数据的轮廓系数
print(score_scaled, score)
- 计算不同cluster值下的轮廓系数
scores = []
for k in range(2,20):
labels = KMeans(n_clusters=k).fit(X).labels_
score = metrics.silhouette_score(X, labels)
scores.append(score)
scores
- 基于2,进行可视化,观察走向(cluster=2最合适)
plt.plot(list(range(2,20)), scores)
plt.xlabel("Number of Clusters Initialized")
plt.ylabel("Sihouette Score")
四.DBSCAN Clustering
- 数据初始化,使用DBSCAN进行分类
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=10, min_samples=2).fit(X) #eps 半径 min_samples 最小密度
labels = db.labels_
beer['cluster_db'] = labels
beer.sort_values('cluster_db')
- 求每个特征的均值
beer.groupby('cluster_db').mean()
- 可视化
pd.scatter_matrix(X, c=colors[beer.cluster_db], figsize=(10,10), s=100)
- 数据标准化后进行DBSCAN分类
db = DBSCAN(eps=10, min_samples=2).fit(X) #eps 半径 min_samples 最小密度
labels = db.labels_
beer['cluster_db'] = labels
beer.sort_values('cluster_db')
- 通过改变eps和min_samples数值,计算轮廓系数并可视化
scores=[]
counter=0
for eps in range(10,20):
for min_samples in range(2,10):
labels=DBSCAN(eps,min_samples).fit(X).labels_
score=metrics.silhouette_score(X,labels)
scores.append(score)
counter+=1
# scores
counter
plt.plot((range(0,80)), scores)
plt.xlabel("value of eps Initialized")
plt.ylabel("Sihouette Score")