Python爬取妹子图片
from bs4 import BeautifulSoup
import requests
import urllib.request
import random
import time
x = 0
def getDbImage(page=1):
response = requests.get('https://www.dbmeinv.com/?pager_offset={}'.format(page))
html = response.text
soup = BeautifulSoup(html, 'lxml')
girl = soup.select('img')
for img in girl:
link = img.get('src')
global x
urllib.request.urlretrieve(link, 'images/{}.jpg'.format(x))
x += 1
print('正在下载第{}张图片'.format(x))
for i in range(1, 50):
print("正在下载第{}页图片".format(i))
time.sleep(1 + random.random())
getDbImage(i)
Python爬取招聘信息
import requests
from bs4 import BeautifulSoup
import time
import random
# 设置header
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Connection': 'keep - alive'
}
movie_list = []
def get_pages_link():
# https://movie.douban.com/top250?start=25
for item in range(1,25,1):
url = "https://www.liepin.com/zhaopin/?init=-1&headckid=8b8a11cc05e34e78&fromSearchBtn=2&ckid=8b8a11cc05e34e78°radeFlag=0&key=python&siTag=I-7rQ0e90mv8a37po7dV3Q~fA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_industry&d_ckId=ac6bf5fc984fbad69fd09f0270ca7f1f&d_curPage=0&d_pageSize=40&d_headId=ac6bf5fc984fbad69fd09f0270ca7f1f&curPage={}".format(item)
web_data = requests.get(url,headers=header)
time.sleep(1 + random.random())
soup = BeautifulSoup(web_data.text,'lxml')
for item in soup.select('.sojob-list li'):
job = item.select('.job-info h3 a')[0].get_text().strip()
condition = item.select('.condition')[0].get_text()
(salary,addr,edu,experence) = condition.split()
print(job)
print(salary)
print(addr)
print(edu)
print(experence)
date = item.select('time')[0].attrs['title']
print(date)
company = item.select('.company-name')[0].get_text().strip()
print(company)
field = item.select('.field-financing')[0].get_text().strip()
print(field)
node = item.select('.temptation')
if node:
temptation = item.select('.temptation')[0].get_text().strip()
print(temptation)
print('-' * 20)
print('\n'+' - '*50+'\n')
if __name__ =='__main__':
get_pages_link()
Python爬虫之抓取豆瓣电影Top250
1.观察网页结构
首先需要登录豆瓣的网站:https://movie.douban.com/top250 ,通过审查元素,可以看到豆瓣的电影都是很整齐的放在li 里面,所以很方便我们爬取数据
通过展开第一个li ,可以看到我们需要的数据
2.观察网址的变化
通过点击页面的页数,我们发现网址也是很有规律的变化的,嘻嘻,是不是有种这个网站就是为爬虫而生的感觉
点击第二页时网址是 : https://movie.douban.com/top250?start=25&filter=
点击第三页时网址是 : https://movie.douban.com/top250?start=50&filter=
并且每一页的电影条数刚好就是25条,所以,我们可以找到规律了.当然网址的&filter是可以去掉的,这并不会影响我们的发挥,还有一点就是通过设置start=0 依然可以访问首页
3.开始写代码了
pip install lxml
pip install beautifulsoup4
import requests
from bs4 import BeautifulSoup
import time
import random
# 设置header
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Connection': 'keep - alive'
}
movie_list = []
def get_pages_link():
# https://movie.douban.com/top250?start=25
for item in range(0,250,25):
url = "https://movie.douban.com/top250?start={}".format(item)
web_data = requests.get(url,headers=header)
time.sleep(1 + random.random())
soup = BeautifulSoup(web_data.text,'lxml')
for movie in soup.select('#wrapper li'):
href = movie.select('.hd > a')[0]['href']
name = movie.select('.hd > a > span')[0].text
star = movie.select('.rating_num')[0].text
people = movie.select('.star > span')[3].text
try:
quote = movie.select('.inq')[0].text
except :
print("没有quote哦")
quote = None
data = {
'url':href,
'评价人数':people,
'片名':name,
'评分':star,
'名言':quote
}
print(data)
print('\n'+' - '*50+'\n')
if __name__ =='__main__':
get_pages_link()
因为在运行的过程中,经常会报错,后来发现是在第9页有一条电影没有quote ,所以需要加上try语句,防止中途罢工