豆瓣即将上映电影top5条形图、柱状图
from xpinyin import Pinyin
import requests
from lxml import html
from matplotlib import pyplot as plt
#设置支持中文字体
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import pandas as pd
# pip install xpinyin
def spider(city):
# splitter 是分隔使用符号,默认是 '-'
city_pinyin = Pinyin().get_pinyin(city,splitter='')
url = 'https://movie.douban.com/cinema/later/{}/'.format(city_pinyin)
print('您要爬取的目标站点是', url)
print('爬虫进行中,请稍后.........')
# 请求头信息, 目的是伪装成浏览器进行爬虫
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'}
# 获取网页的源代码
response = requests.get(url, headers = headers)
html_data = response.text
# print(html_data)
# 提取我们想要的内容
selector = html.fromstring(html_data)
div_list = selector.xpath('//div[@id="showing-soon"]/div')
print('您好,{}市共查询到{}部即将上映的电影'.format(city, len(div_list)))
movie_info_list = []
for div in div_list:
# 获取电影名字
movie_name = div.xpath('div[1]/h3/a/text()')
# if len(movie_name)==0:
# movie_name = '没有查询到数据'
# else:
# movie_name = movie_name[0]
movie_name = '没有查询到数据' if len(movie_name) == 0 else movie_name[0]
# print(movie_name)
# 想看人数
want_see = div.xpath('div[1]/ul/li[4]/span/text()')[0]
want_see = int(want_see.replace('人想看', ''))
# print(want_see)
movie_info_list.append({
"movie_name":movie_name,
"want_see":want_see,
})
movie_info_list.sort(key=lambda x: x['want_see'],reverse=True)
a=[]
b=[]
for movie in movie_info_list:
movie_name=movie['movie_name']
want_see=movie['want_see']
a.append(movie_name)
print(a)
b.append(want_see)
x=[]
y=[]
for i in range(5):
x.append(a[i])
y.append(b[i])
# plt.barh(x, y)
# plt.ylabel('电影名称')
# plt.xlabel('想看人数')
# plt.show()
# labels = ['电影:{}'.format(i) for i in range(1, 9)]
colors = ['red', 'blue', 'green', 'yellow', 'pink']
# 每一个元素距中心点的距离 可选值 0~1
explode = [0, 0, 0, 0, 0.2]
plt.pie(x=y,
labels=x,
colors=colors,
shadow=True,
startangle=90,
explode=explode,
autopct='%1.1f%%'
)
plt.axis('equal') # 设置成标准圆形
plt.legend(loc=2) # 指定为2象限
plt.title('电影')
plt.show()
city = 'shenayng'
# 调用函数
spider(city)
三国人物top10条形图、柱状图
import jieba
from wordcloud import WordCloud
import imageio
from matplotlib import pyplot as plt
# 设置支持中文字体
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
#读取文件
# mask = imageio.imread('china.jpg')
with open('./threekingdom.txt','r',encoding='UTF-8') as f:
data = f.read()
#分词
words_list = jieba.lcut(data)
print(words_list)
#构建一个集合,定义无关词
excludes = {"将军","却说","二人","不可","荆州","不能","如此","丞相","商议",
"如何","主公","军士","军马","左右","次日","引兵","大喜","天下","东吴","于是","今日"
,"不敢","魏兵","陛下","都督","人马","不知","孔明曰","玄德曰","云长","刘备"}
#构建一个容器,存储我们要的数据
#{"夏侯渊":34,"害怕":33....}
counts = {}
#遍历word_list 目标是筛选出人名
for word in words_list:
#print(word)
if len(word)<= 1:
#过滤无关词语即可
continue
else:
#向字典counts里更新值
#counts[word]=字典里原来该次出现的次数 + 1
# counts[word] = counts[word] + 1
# counts["正文"] = counts["正文"] + 1
counts[word] = counts.get(word,0) + 1
# print(counts)
#将指向同一个人的词进行合并
counts['孔明'] = counts['孔明'] + counts['孔明曰']
counts['玄德'] = counts['玄德'] + counts['玄德曰'] + counts['刘备']
counts['关公'] = counts['关公'] + counts['云长']
#删无关词
for word in excludes:
del counts[word]
#排序筛选
#吧字典转化成列表[(),()] [{},{}]
items = list(counts.items())
#按照词频次数进行排序
items.sort(key=lambda x:x[1],reverse=True)
#显示出现词语前10的词
#role_list = []
# role_list = ["孔明","孔明",。。。]
role_list = []
x = []
y = []
for i in range(10):
#将返回的数据拆开,拆包
#print(items[i])
role,count = items[i]
print(role,count)
# #i临时文件如果不需要的话可以写成_
# #优点是让读代码的人能够清晰的知道此处不需要使用i
# for _ in range(count):
# role_list.append(role)
# print(role_list)
x.append(role)
y.append(count)
# plt.bar(x,y)
# plt.grid()#格子
# plt.xlabel('人物')
# plt.ylabel('次数')
# plt.show()
colors = ['red', 'blue', 'green', 'yellow', 'pink', 'purple', 'gray', 'orange']
# 每一个元素距中心点的距离 可选值 0~1
explode = [0, 0, 0, 0, 0.2,0,0,0,0,0]
plt.pie(x=y,
labels=x,
colors=colors,
shadow=True,
startangle=90,
explode=explode,
autopct='%1.1f%%'
)
plt.axis('equal') # 设置成标准圆形
plt.legend(loc=2) # 指定为2象限
plt.title('三国人物')
plt.show()
#课上老师版三国人物
import jieba
from wordcloud import WordCloud
import imageio
from matplotlib import pyplot as plt
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 读取文件
# mask = imageio.imread('china.jpg')
with open('./threekingdom.txt', 'r', encoding='UTF-8') as f:
data = f.read()
# 分词
words_list = jieba.lcut(data)
print(words_list)
# 构建一个集合,定义无关词
excludes = {"将军","却说","二人","不可","荆州","不能","如此","丞相",
"商议","如何","主公","军士","军马","左右","次日","引兵",
"大喜","天下","东吴","于是","今日","不敢","魏兵","陛下",
"都督","人马","不知","孔明曰","玄德曰","玄德","云长"}
#,"","","","","","","","","","",
# 构建一个容器,存储我们要的数据
# {"夏侯渊":34,"害怕":33......}\
counts = {}
# 遍历wordlist 目标是筛选出人名
for word in words_list:
# print(word)
if len(word) <= 1:
# 过滤无关词语即可
continue
else:
# 向字典counts里更新值
# counts[word] = 字典中原来该词出现的次数 + 1
# counts[word] = counts[word] + 1
# counts["正文"] = counts["正文"] +1
counts[word] = counts.get(word, 0) + 1
# print(counts)
# 指向同一个词的人进行合并
counts['孔明'] = counts['孔明'] + counts['孔明曰']
counts['刘备'] = counts['玄德'] + counts['玄德曰'] + counts['刘备']
counts['关公'] = counts['关公'] + counts['云长']
# 删除无关的词语
for word in excludes:
del counts[word]
# 排序筛选
# 吧字典转化成列表 [(),()] [{},{}]
items = list(counts.items())
# 按照词频次数进行排序
items.sort(key=lambda x:x[1],reverse=True)
# 显示出现词语前10的词
# role_list = ["孔明","孔明","孔明","","","",]
role_list = []
role_l = [] # x
role_c = [] # y
for i in range(10):
# 将返回的数据拆开 ,拆包
# print(items[i])
role, count = items[i]
print(role, count)
role_l.append(role)
role_c.append(count)
# i 临时变量如果不需要的话可以写成 _
# 优点是让读代码的人能够清晰的知道此处不需要使用 i
for _ in range(count):
role_list.append(role)
print(role_l,role_c)
plt.bar(role_l, role_c)
plt.show()
plt.pie(role_c,labels=role_l,autopct='%1.1f%%')
plt.show()
# print(role_list)
# # 将列表变成字符串
# # text = "孔明 孔明 孔明 孔明......刘备 刘备 刘备 曹操 曹操 曹操"
# text = " ".join(role_list)
# print(text)
# 展示
# WordCloud(
# background_color='white',
# mask=mask,
# font_path='msyh.ttc',
# # 是否包含两个词的搭配 设置为false即可
# collocations=False
#
# ).generate(text).to_file('三国人物前十展示.png')
老师版电影
from xpinyin import Pinyin
import requests
from lxml import html
from matplotlib import pyplot as plt
#设置支持中文字体
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import pandas as pd
# pip install xpinyin
def spider(city):
# splitter 是分隔使用符号,默认是 '-'
city_pinyin = Pinyin().get_pinyin(city,splitter='')
url = 'https://movie.douban.com/cinema/later/{}/'.format(city_pinyin)
print('您要爬取的目标站点是', url)
print('爬虫进行中,请稍后.........')
# 请求头信息, 目的是伪装成浏览器进行爬虫
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'}
# 获取网页的源代码
response = requests.get(url, headers = headers)
html_data = response.text
# print(html_data)
# 提取我们想要的内容
selector = html.fromstring(html_data)
div_list = selector.xpath('//div[@id="showing-soon"]/div')
print('您好,{}市共查询到{}部即将上映的电影'.format(city, len(div_list)))
movie_info_list = []
for div in div_list:
# 获取电影名字
movie_name = div.xpath('div[1]/h3/a/text()')
# if len(movie_name)==0:
# movie_name = '没有查询到数据'
# else:
# movie_name = movie_name[0]
movie_name = '没有查询到数据' if len(movie_name) == 0 else movie_name[0]
# print(movie_name)
country = div.xpath('div[1]/ul/li[3]/text()')[0]
# 想看人数
want_see = div.xpath('div[1]/ul/li[4]/span/text()')[0]
want_see = int(want_see.replace('人想看', ''))
# print(want_see)
movie_info_list.append({
"movie_name":movie_name,
"want_see":want_see,
"country": country,
})
movie_info_list.sort(key=lambda x: x['want_see'],reverse=True)
#绘图
movie_name_top5=[movie['movie_name'] for movie in movie_info_list[:5]]
want_see_top5=[movie['want_see'] for movie in movie_info_list[:5]]
plt.barh(movie_name_top5,want_see_top5)
plt.ylabel('电影名称')
plt.xlabel('想看人数')
plt.show()
#国家占比
country_list = [movie['country'] for movie in movie_info_list]
counts ={}
for x in country_list:
counts[x] = counts.get(x,0) + 1
country_x=list(counts.values())
labels=list(counts.keys())
colors = ['red', 'blue', 'green', 'yellow', 'pink']
plt.pie(x=country_x,
labels=labels,
colors=colors,
shadow=True,
startangle=90,
autopct='%1.1f%%'
)
plt.axis('equal') # 设置成标准圆形
plt.legend(loc=2) # 指定为2象限
plt.title('电影')
plt.show()
city = 'shenayng'
# 调用函数
spider(city)