用lxml来爬取招聘网站信息
用requests获取网页
解析网页内容
保存数据成csv格式
用requests获取网页
import requests
from lxml import etree
import time
import numpy as np
import pandas as pd
if __name__ == '__main__':
target = 'https://www.zhipin.com/c100010000/?query=数据分析&period=1&page='
targetpage = '&ka=page-'
page = 1
headers={'user-agent':'Mozilla/6.0'}
target targetpage 以及page用于定义翻页信息收集
网页内容将储存在定义的变量里
title = []
title_link = []
salary = []
location = []
date_update=[]
company_name = []
company_info = []
job_exp = []
解析网页内容
获取3页招聘网站基本信息
for n in range(1,4):
oburl = target+str(page)+targetpage+str(page)
page +=1
html = requests.get(url=oburl,headers=headers)
ab = etree.HTML(html.text)
title = np.concatenate((title,ab.xpath('//div[@class="job-title"]/text()')))
title_link = np.concatenate((title_link,ab.xpath('//div[@class="job-title"]/ancestor::a/@href')))
salary = np.concatenate((salary,ab.xpath('//span[@class="red"]/text()')))
location = np.concatenate((location,ab.xpath('//div[@class="info-primary"]/p/child::node()[1]')))
job_exp = np.concatenate((job_exp,ab.xpath('//div[@class="info-primary"]/p/child::node()[3]')))
date_update = np.concatenate((date_update,ab.xpath('//div[@class="info-publis"]/p/text()')))
company_name=np.concatenate((company_name,ab.xpath('//div[@class="company-text"]//a/text()')))
company_info=np.concatenate((company_info,ab.xpath('//div[@class="company-text"]/p/child::node()[1]')))
time.sleep(1)
组合变量形成dataframe格式,并且命名列名
data = pd.DataFrame([title,salary,location,job_exp,company_name,company_info,date_update,title_link]).T
data.rename(columns={0:'title',1:'salary',2:'location',3:'job_exp',4:'company name',5:'company info',6:'date_update',7:'link'},inplace=True)#
保存数据成csv格式
import csv
myfile = 'web_spyder_name.csv'
with open(myfile, 'wb') as csvfile:
data.to_csv(myfile, encoding='utf_8_sig')
储存成csv格式文件