事情的开始是这样的:
最近准备要换房子了,经同事推荐上豆瓣没有中介费.(大写的穷! :( )但是上去一看没有🔍的功能,这要一条一条的看,那得看到什么时候啊,考虑到近视越来越严重,开始撸波代码抓下数据吧...正好把最近学习的用上
豆瓣.py (原谅我命名的不规范)
import re
import time
from CLMongo import mongo,mongo_connect_collection
import pymongo
from selenium import webdriver
class DB_request:
basic_url = 'https://www.douban.com/group/search?cat=1019&q='
start = 0
isToday = True
def __init__(self,search_data):
self.search_data = search_data
one_request_url = self.basic_url + self.search_data
self.requestTo_douban(one_request_url)
def requestTo_douban(self,request_url=basic_url,page = start):
driver,data = self.request_from_url(request_url)
driver.quit()
group_list = self.re_findall(r'result(.*?)class=\"info\"',data)
if group_list:
# 遍历搜索到的小组,拿到url
for agroup in group_list:
self.isToday = True
agroup_content_url_list = self.re_findall(r'href=\"(.*?)\"', agroup)
if agroup_content_url_list:
agroup_content_url = agroup_content_url_list[0]+'discussion?start='
self.content_from_agroupURL(agroup_content_url)
else:
print("查找一个小组列表的 URL 失败了!")
else:
print("搜索的小组为空?")
''' 翻页 '''
# 简单测试了一下 100页之后会跳转登录,为了简单点就只抓99页的数据,已经很多了
if page < 99:
self.start += 20
page_url = 'https://www.douban.com/group/search?start=%s&cat=1019&sort=relevance&q=%s'%(self.start,self.search_data)
self.requestTo_douban(page_url,self.start)
def content_from_agroupURL(self,url,start='0'):
# 这块也可以写成触发点击事件,速度上会快一些
request_url = url+start
driver, text = self.request_from_url(request_url)
driver.quit()
content_list = self.re_findall(r'<tr class=\"\">(.*?)</tr>',text)
if content_list:
self.content_handle(content_list)
# 如果还是日期相同 循环调用
if self.isToday:
self.content_from_agroupURL(url, str(int(start) + 25))
else:
print('已经是不同日期了', self.isToday)
def content_handle(self,list):
for content in list:
content_time = self.re_findall(r'class=\"time\">(.*?)</td>', content)
d = self.re_findall(r'-(.\d) ', content_time[0])[0]
local_d = time.strftime('%d', time.localtime())
# 只抓取当天的数据,所以做一个时间上的比较
if d == local_d:
content_url = self.re_findall(r'class=\".*?href=\"(.*?)\" title', content)
content_title = self.re_findall(r'title=\"(.*?)\" ', content)
dict = {'day': str(d),
'title': content_title[0]}
self.content_detail(content_url[0],dict)
else:
print("不是最新的消息了已经,我就不抓取咯..",d,local_d)
self.isToday = False
break
def content_detail(self,detail_url,dict):
# 把url当做唯一不重复的,去重
if mongo.is_repeat(detail_url):
driver, detail_data = self.request_from_url(detail_url)
driver.quit()
dict['url'] = detail_url
detail_content = self.re_findall(r'id=\"link-report\"(.*?)<div id=\"link-report_group\"',
detail_data)
if detail_content:
image_list = self.re_findall(r'<img src=\"(.*?)\"',
detail_content[0])
title_list = self.re_findall(r'<p>(.*?)</p>',
detail_content[0])
if image_list:
dict['images'] = image_list
dict['images_count'] = len(image_list)
else:
dict['images'] = []
dict['images_count'] = 0
if title_list:
# 拼接描述
detail_title = ''
for title in title_list:
detail_title = detail_title + title + '\n'
dict['content'] = detail_title
else:
dict['content'] = ''
self.write_db(dict)
''' 公共方法 '''
''' 正则查找,返回 list '''
def re_findall(self,compile,string):
pattern = re.compile(compile, re.S)
list = re.findall(pattern, string)
return list
''' 根据 URL 请求数据,并返回 '''
def request_from_url(self,url):
driver = webdriver.Chrome()
print("url -->",url)
# chrome_options = webdriver.ChromeOptions()
# 不显示图片
# prefs = {"profile.managed_default_content_settings.images": 2}
# chrome_options.add_experimental_option("prefs", prefs)
# driver = webdriver.Chrome(chrome_options=chrome_options)
try:
driver.get(url)
except Exception as e:
print(e)
driver.refresh()
html_data = driver.page_source
return driver,html_data
def write_db(self,dict):
mongo.insert_handle(dict)
if __name__ == '__main__':
mongo.remove_time_out()
# search_data = "%E5%8C%97%E4%BA%AC%E7%A7%9F%E6%88%BF"
search_data = "北京租房"
DB_request(search_data)
CLMongo.py
import pymongo
import time
import random
class MongoHandle(object):
def connect_collection(self):
print('连接 MongoDB ...')
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
# 连接到 test 库
db = client['test']
# 打印库中所有的集合名称
# print(db.collection_names())
# 返回 DouBan 这个集合
return db.DouBan
def insert_handle(self,dict):
try:
mongo_connect_collection.insert_one(dict)
except Exception as e:
print(e)
finally:
time.sleep(random.randint(1,8))
def remove_time_out(self):
local_d = time.strftime('%d', time.localtime())
try:
# 对比一下今天的时间,不一致就删除
mongo_connect_collection.remove({"day": {"$ne": local_d}})
except Exception as e:
print("出错了 -->",e)
def is_repeat(self,url):
repeat = mongo_connect_collection.find_one({"url": url})
if repeat:
return False
else:
return True
mongo = MongoHandle()
mongo_connect_collection = MongoHandle().connect_collection()
关于 MongoDB 的部分操作移步: MongoDB各种查询
如有侵权,请告知删除
刚学爬虫不久,今后有机会更深入学习的时候再改进代码.欢迎大神指正!