感想
这是自己的第一个针对真实网页的爬虫,写完真是满满的自豪感。从来没有想过,在这么短的时间里,可以那么快学会如何写爬虫。虽然现在写的还很简单粗糙,但会一直努力下去。
代码
from bs4 import BeautifulSoup
import requests
import json
import random
def gender_finder(lorder_genders_raw):
for lorder_gender_raw in lorder_genders_raw:
lorder_genders = lorder_gender_raw.get('class')
b = ['member_ico']
if lorder_genders == b:
result = 'male'
else:
result = 'female'
return result
def get_information(web_data):
soup = BeautifulSoup(web_data.text, 'lxml')
titles = soup.select('div.pho_info > h4 > em')
addresses = soup.select('div.pho_info > p > span')
prices = soup.select('div.day_l > span')
room_pictures = soup.select('#curBigImage')
lorder_pictures = soup.select('div.member_pic > a > img')
lorder_genders_raw = soup.select('div.member_pic > div')
lorder_names = soup.select('div.w_240 > h6 > a')
lorder_genders = gender_finder(lorder_genders_raw)
for title, address, price, room_picture, lorder_gender, lorder_picture, lorder_name in zip(
titles, addresses, prices, room_pictures, lorder_genders, lorder_pictures, lorder_names):
data = {
'title': title.get_text(),
'address': address.get_text().strip('\n '),
'price': price.get_text(),
'room_picture': room_picture.get('src'),
'lorder_gender': lorder_gender,
'lorder_name':lorder_name.get_text(),
'lorder_picture':lorder_picture.get('src')
}
return data
def get_url(url_content):
content_data = requests.get(url_content,proxies=random.choice(ips), timeout=6)
soup = BeautifulSoup(content_data.text, 'lxml')
urls_raw = soup.select('#page_list > ul > li > a')
for url in urls_raw:
urls = [url.get('href') for url in urls_raw]
return(urls)
resp = requests.get("http://tor1024.com/static/proxy_pool.txt")
ips_txt = resp.text.strip().split("\n")
ips = []
for i in ips_txt:
try:
k = json.loads(i)
ips.append(k)
except Exception as e:
print(e)
url_contents = ['http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1,11,1)]
for url_content in url_contents:
urls = get_url(url_content)
for url in urls:
web_data = requests.get(url, proxies=random.choice(ips), timeout=6)
data=get_information(web_data)
print(data)
总结
- 爬网页一定要记得用代理,用代理,用代理
- 可以用.strip去掉爬取的信息中不必要的部分
- 用函数的方式,把程序分成小块写,会比较容易