kNN是一种聚类算法,python的代码分类器函数思路
即读取的数据为一个inX=[x,y,z]
inX的第一列向量重复成inX的同型矩阵(利用tile)[[x,y,z],[x,y,z]......]
作差
平方后每个行相加,再开根号,欧氏距离
这样就算出了inX与数据集中点的各个距离
排序
取最小的前k个
然后计算标签频率
最后确定这个inX的label
物以类聚,人以群分KNN.py
from numpy import *
import operator
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
def classify0(inX, dataSet, labels, k): #inX用于分类的输入向量,dataSet训练样本集,labels标签向量,k为选择最近邻居的数目
dataSetSize = dataSet.shape[0] #得到矩阵的长
diffMat = tile(inX, (dataSetSize, 1)) - dataSet#tile重复inX,有dataSetSize个行,减去计算差值输入的数据的矩阵,也就是散布
sqDiffMat = diffMat**2 #方差
sqDistances = sqDiffMat.sum(axis = 1) #[[],[],[],[]]中每个[]里的加起来变成一个值
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort() #返回数值从小到大的索引值
classCount = {} #字典类型
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]] #根据排序结果的索引值返回靠近的前k个标签,这个在数据里已经给好了
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 #计算各个标签出现的频率
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse = True)
# python3中:classCount.iteritems()修改为classCount.items()
# sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list。
# reverse默认升序 key关键字排序itemgetter(1)按照第一维度排序(0,1,2,3)
return sortedClassCount[0][0]
def file2matrix(filename):
fr = open(filename)
arrayOLines = fr.readlines() #读的每一行就是arrayOLines
numberOfLines = len(arrayOLines) #数据行数
returnMat = zeros((numberOfLines,3)) #创建返回矩阵
classLabelVector = []
index = 0
for line in arrayOLines:
line = line.strip() #删除空白符
listFromLine = line.split('\t') #把这行再分割
returnMat[index,:] = listFromLine[0:3] #选取前三个元素存储在返回矩阵中
classLabelVector.append(int(listFromLine[-1])) #最后一个元素其实就是标签,用数组存储
index += 1
return returnMat,classLabelVector
def autoNorm(dataSet): #归一
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals,(m,1))
normDataSet = normDataSet / tile(ranges,(m,1))
return normDataSet,ranges,minVals
def datingClassTest(): #测试函数
hoRatio = 0.10
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')#原文中的数据label是字符串之后fileread读不进去
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m],3)
print('the classifier came back with: %d, the real answer is : %d'% (classifierResult,datingLabels[i]))
if (classifierResult != datingLabels[i]): errorCount += 1.0
print('the total error rate is : %f' % (errorCount/float(numTestVecs)))
def classifyPerson(): #判别是个什么样的人
resultList = ['not at all','in small doses','in large doses'] #最后的三种结果,就是问题的答案
percentTats = float(input('percentage of time spent playing video games?'))
ffMiles = float(input ('frequent flier miles earned per year?'))
iceCream = float(input("liters of ice cream consumed per year?"))
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
inArr = array([ffMiles, percentTats, iceCream])
classifierResult = classify0((inArr - minVals) / ranges, normMat, datingLabels, 3)
print("You will probably like this person:", resultList[classifierResult - 1])
- KNN_Run.py
import KNN
import matplotlib
import matplotlib.pyplot as plt
#添加下面一条,绘制不同颜色 否则出现array未定义错误
from numpy import *
datingDataMat,datingLabels = KNN.file2matrix('datingTestSet2.txt')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,1], datingDataMat[:, 2], 15.0*array(datingLabels), 15.0*array(datingLabels))
#ax.scatter(datingDataMat[:,0], datingDataMat[:, 1], 15.0*array(datingLabels), 15.0*array(datingLabels))
plt.show() #显示图
按2-3列
按1-2列