爬取小猪短租上300条租房信息,包括标题、地址、日租金、房东、房东性别、房东头像、房屋图片等,如下图所示:
最终程序运行截图:
完整代码:
from bs4 import BeautifulSoup
import requests
import time
def get_detail_info(url, data=None):
# 爬取单条租房信息(标题,图片,房东,日租金,房东性别,房东头像)
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
time.sleep(2)
title = soup.select('h4 > em')[0].get_text()
address = soup.select('span.pr5')[0].get_text()
rent = soup.select('div.day_l > span')[0].get_text()
image = soup.select('#curBigImage')[0].get('src')
lorder_pic = soup.select('div.member_pic > a > img')[0].get('src')
lorder_name = soup.select('a.lorder_name')[0].get_text()
lorder_sex = soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > span')[0].get('class')
def get_gender(class_name):
if class_name == "member_boy_ico":
return "男"
else:
return "女"
data = {
'标题': title,
'地址': address,
'日租金': rent,
'图片': image,
'房东头像': lorder_pic,
'房东姓名': lorder_name,
'房东性别': get_gender(lorder_sex)
}
print(data)
def get_links(url):
# 获取详情页链接
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
links = soup.select('#page_list > ul > li > a')
for link in links:
href = link.get('href')
get_detail_info(href)
urls = ['http://su.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i) for i in range(1,10))]
# 获取9页的租房信息
for url in urls:
get_links(url)
总结:
用requests库打开网页(get, post等方法)