python爬虫Xpath,pandas示例

from lxml import etree
import pandas as pd
import requests
import pickle
import sys
import os


def get_page_content(url):
    """链接网页,获取页面内容"""
    head = 'http://ttc.seu.edu.cn'
    header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
              'referer': 'http://ttc.seu.edu.cn'}

    while True:
        try:
            print(f'\033[1;32m正在下载: {url}\033[0m')
            content = requests.get(url, headers=header, timeout=10)
        except KeyboardInterrupt:
            print('停止获取网页成果,即将退出...')
            sys.exit()
        except:
            print('\033[1;31m网页内容获取失败,重新链接...\033[0m')
            continue
        break
    html = etree.HTML(content.text)
    title = html.xpath('//div[@class="article-title-all"]/text()')[0]  # 成果标题
    # data1 = html.xpath('//dl/*/text()')   # 描述属性的标题与内容,内容为空的部分会缺失,没有占位
    technical_filed = html.xpath('//dl/dd/span/text()')  # 技术领域
    technical_filed = ', '.join(technical_filed)
    technical_describe = html.xpath('//div[@class="gray-box-no-border resource-big-detail"]/p/text()')  # 成果介绍
    technical_describe = '<br>'.join(technical_describe)
    content1 = html.xpath('//div[@class="col-sm-6 col-xs-12"]/dl/dd')  # 证书,单位,时间,阶段,领域,产权,归属
    content2 = html.xpath('//div[@class="col-md-6 col-xs-12"]/div/dl/dd')  # 合作,单位介绍
    pics = html.xpath('//div[@class="sp-wrap"]/a')  # 获取图片节点
    pics_link = [head + v.values()[0] for v in [a.attrib for a in pics]]  # 获取标签属性里面图片链接,组成列表
    pics_links = [', '.join(pics_link)]
    contact = html.xpath('//div[@class="gray-box-no-border"]/p/text()')  # 联系方式
    content1 = [i.text for i in content1]
    content2 = [j.text for j in content2]
    content1[4] = technical_filed

    content = [title] + content1 + [technical_describe] + content2 + contact + pics_links + [url]
    save_to_excel(content, title)  # 保存获取的内容


def save_to_excel(content,title):
    """把内容保存到excel表格"""
    print('保存数据...')
    file_name = 'ddcg.xlsx'
    keylist = ['成果名称','知识产权证书号','单位名称','发布时间','项目阶段','技术领域','知识产权情况','成果归属','成果介绍',
               '项目负责人', '合作方式','转让价格','法人代表','单位规模','单位性质','电话','邮箱','地址','图片链接','原始网页链接']
    tem_zip = list(zip(keylist,content))

    c = {}  # 保存结果
    for key, value in tem_zip:
        c[key] = [value]
    pd_c = pd.DataFrame(c)  # 转化为表格数据
    if os.path.exists(file_name):
        data = pd.read_excel(file_name)
        data = pd.concat([data, pd_c])
        data.to_excel(file_name, index=False)
        print('%s: 追加完毕!' % title)
    else:
        pd_c.to_excel(file_name, index=False)
        print('%s: 保存完毕!' %title)


def get_page_link(list_link):
    """获取页面成果链接以及下一页列表链接"""
    head = 'http://ttc.seu.edu.cn'
    header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
              'referer': 'http://ttc.seu.edu.cn'}
    num = list_link.split('=')[-1]
    print(f'正在获取第 {num} 页成果列表...')
    while True:
        try:
            page_data  = requests.get(list_link,headers=header,timeout=10)
            break
        except KeyboardInterrupt:
            print('主动退出下载:\n已经下载到:%s' % list_link)
            with open('list_link.txt','w') as lobj:
                lobj.write(list_link)
        except:
            print('获取网页错误,重新下载...')
            continue

    data_xpath = etree.HTML(page_data.text)
    cc_list_link = data_xpath.xpath('//a[@class="article-title"]')  # 获取成果链接标签元素
    alist = [head + link[link.keys()[0]] for link in [d.attrib for d in cc_list_link]]  # 得到本页面成果列表链接
    # print(alist)
    next_list_e = data_xpath.xpath('//nav/ul/li[last()]/a')  # 获取下一页链接标签元素
    next_list_page_e = [v['href'] for v in [a.attrib for a in next_list_e]][0]  # 获取标签的下一页链接内容
    # print(next_list_page_e)
    get_next_list = head + next_list_page_e if next_list_page_e.startswith('/abutment') else ''

    return get_next_list, alist


if __name__ == '__main__':
    url = 'http://ttc.seu.edu.cn/abutment/technologyachievementsearch?pageIndex=2'
    while True:
        next_list_link, cclist = get_page_link(url)
        while cclist:
            link = cclist.pop()
            while True:
                try:
                    get_page_content(link)
                    with open('download_link.txt','a') as dobj:
                        dobj.write(link + '\n')  # 保存已下载链接
                    break
                except KeyboardInterrupt:
                    print('主动退出,保存进度...')
                    with open('cc_list.txt','w') as cobj:
                        cclist.append(link)
                        # pickle.dump(cclist,cobj)
                        cobj.write(cclist)
                    with open('next_list.txt','w') as lobj:
                        lobj.write(next_list_link)
                    exit()
                except Exception as e :
                    print('其他错误。。。', e)
                    # continue
        if next_list_link == '':
            print('下载完毕,退出下载...')
            for i in ['cc_list.txt','next_list.txt']:
                if os.path.exists(i):
                    os.remove(i)
            sys.exit()
        url = next_list_link
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容