摘要
- 1.response = requesets.get(url,headers=header)
- 2.soup = BeautifulSoup(response.text,'html.parser')
- 3.guoguo = soup.select('...')
- 2.2 pattern = re.compile('...',re.S)
- 3.2 guoguo = re.findall(pattern, response.text)
1.观察网址的翻页特征,以豆瓣电影为例
- https://maoyan.com/board/4?
- https://maoyan.com/board/4?offset=10
-
https://maoyan.com/board/4?offset=20
以上为豆瓣电影网站的前三页 -
https://maoyan.com/board/4?offset=0
测试将offset=0添加至第一页后面,依旧跳转第一页
import requests
import time
from bs4 import BeautifulSoup
for i in range(10):
offset = i * 10
url = 'http://maoyan.com/board/4?offset='+str(offset)
time.sleep(2)
#暂停2s
2.获取网址文本内容
def get_one_page(url):
header={'user-agent':'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'}
response = requests.get(url,headers=header)
if response.status_code == 200:
return response.text
return None
3.用正则表达式解析配对文本内容
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',
re.S)
items = re.findall(pattern, html)
查看:正则表达式字符含义
常用字符:
- /s 空格
- /d 数字
- /w 字符
常用语法:
- re.compile('...',re.S) #re.S表示可换行
- re.findall(pattern, html) #匹配正则pattern和网址文本
3.2 用性质查找
s = request.session() # 会话维持
response = s.get(url,headers=header)
soup = BeautifulSoup(response.text,'html.parser')
index = soup.find_all(attrs={'class':'board-index'}) #可支持模糊搜索
title = soup.find_all(attrs={'class':'name'})
time = soup.find_all(attrs={'class':'releasetime'})
for i in index:
print(i.get_text())
#即可得到各输出内容
3.3用selector或xpath查找
右键检查元素,复制选择器路径
response = requests.get(url,headers=header)
soup = BeautifulSoup(response.text,'html.parser')
post=soup.select('body > div.con > div.leftCon > div.item_con.pos_info > div.pos_base_info > span.pos_title')
address=soup.select('body > div.con > div.leftCon > div.item_con.pos_info > div.pos-area > span')
wage=soup.select('body > div.con > div.leftCon > div.item_con.pos_info > div.pos_base_info > span.pos_salary')
4 存储数据
- 以字典形式存入txt文件
import json
#字典形式用json储存
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',
re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2].strip(),
'actor': item[3].strip()[3:] if len(item[3]) > 3 else '',
'time': item[4].strip()[5:] if len(item[4]) > 5 else '',
'score': item[5].strip() + item[6].strip()
}
def write_to_file(content):
with open('result.txt','a',encoding='utf-8') as f: #新建txt,a表示写入的是字符
f.write(json.dumps(content,ensure_ascii=False)+'\n') #开始写,写字典用json来格式化
f.write('===========================================\n')
- 以DataFrame形式存入
test = pd.DataFrame()
temp = pd.DataFrame()
for url in urls:
links = get_url(url)
time.sleep(2) #得到所有的页
for link in links:
href = link.get('href') #取到每页中所有子链接的网址
wb_data = requests.get(href,headers=header) #开始取每页中的内容
soup = BeautifulSoup(wb_data.text,'lxml')
post=soup.select('body > div.con > div.leftCon > div.item_con.pos_info > div.pos_base_info > span.pos_title')
address=soup.select('body > div.con > div.leftCon > div.item_con.pos_info > div.pos-area > span')
wage=soup.select('body > div.con > div.leftCon > div.item_con.pos_info > div.pos_base_info > span.pos_salary')
requirement = soup.select('body > div.con > div.leftCon > div.item_con.pos_info > span')
corp=soup.select('body > div.con > div.rightCon > div > div > div.comp_baseInfo_title > div.baseInfo_link > a')
for i in range(len(post)):
temp.loc[i,'职位'] = post[i].get_text()
temp.loc[i,'地址'] = address[i].get_text()
temp.loc[i,'工资'] = wage[i].get_text()
temp.loc[i,'公司'] = corp[i].get_text()
temp.loc[i,'要求'] = requirement[i].get_text()
test = test.append(temp)
- 以数据库形式导入
查看:爬虫导入数据库