爬美股吧 最终修改

先上代码

# -*- coding:utf-8 -*-
import requests
from lxml import etree
import csv

import sys

reload(sys)
sys.setdefaultencoding('utf-8')

start_url = "http://guba.eastmoney.com/list,meigu_1.html"
headers = {
    "User-Agent": "Mozilla / 5.0(Windows NT 6.1;Win64;x64)"
                  "AppleWebKit / 537.36(KHTML, likeGecko)"
                  "Chrome / 58.0.3029.110"
                  "Safari / 537.36"
}


# def get_total_page(start_url):
#    html = requests.get(url=start_url, headers=headers).content
#    selector = etree.HTML(html)
#    sum_page = selector.xpath("//span[@class='sumpage']/text()")
#    return sum_page


def parse_title():
    # sum_page = get_total_page(start_url)
    rows = []
    for num in range(1, 23):
        url = "http://guba.eastmoney.com/list,meigu_" + str(num) + ".html"
        html = requests.get(url=url, headers=headers).content
        selector = etree.HTML(html)
        items = selector.xpath("//div[@id='articlelistnew']/div[position()>1 and position()<last()]")
        for item in items:
            title = item.xpath("span[@class='l3']/a/text()")[0].decode(encoding='utf-8')
            author_temp = item.xpath("span[@class='l4']/a/text()") if item.xpath("span[@class='l4']/a/text()") else [
                u'匿名网友']
            author = author_temp[0].decode(encoding='utf-8')
            read = item.xpath("span[@class='l1']/text()")[0]
            comment_num = item.xpath("span[@class='l2']/text()")[0]
            post_time = item.xpath("span[@class='l6']/text()")[0]
            last_update = item.xpath("span[@class='l5']/text()")[0]
            link = item.xpath("span[@class='l3']/a/@href")
            complete_link = 'http://guba.eastmoney.com' + link[0] if str(link[0]).startswith('/') else 'http://guba.eastmoney.com/' + link[0]
            rows.append(
                {'title': title, 'author': author, 'read': read, 'comment_num': comment_num, 'post_time': post_time,
                 'last_update': last_update, 'link': link, 'complete_link': complete_link})
    return rows


def parse_content_comment():
    links = []
    temp = parse_title()
    for item in temp:
        links.append(item['link'][0])
    rows = []
    for link in links[0:8]:
        url = "http://guba.eastmoney.com/" + link
        html = requests.get(url=url, headers=headers).text.decode(encoding='utf-8')
        selector = etree.HTML(html)
        lines = {}
        content = ''
        contents = selector.xpath("//div[@class='stockcodec']/div[@id='zw_body']/p/text()") if selector.xpath("//div[@class='stockcodec']/div[@id='zw_body']/p/text()") else [u'none']
        for item in contents:
            content += item
        lines['content'] = content
        comments = selector.xpath("//div[@id='zwlist']")
        for item in comments:
            if item.xpath("div[@class='zwli clearfix']"):
                name = ''
                names = item.xpath("div/div/div/div[@class='zwlianame']/span/a/text()")
                for na in names:
                    name += na
                comment = ''
                comments = item.xpath("div/div/div/div[@class='zwlitext stockcodec']/text()")
                for co in comments:
                    comment += co.strip()
                time = ''
                times = item.xpath("div/div/div/div[@class='zwlitime']/text()")
                for ti in times:
                    time += ti
                lines['name'] = name
                lines['comment'] = comment
                lines['time'] = time
            else:
                lines['name'] = 'none'
                lines['comment'] = 'none'
                lines['time'] = 'none'
            rows.append(lines)
    for link in links[8:]:
        url = "http://guba.eastmoney.com" + link
        html = requests.get(url=url, headers=headers).text
        selector = etree.HTML(html)
        lines = {}
        content = ''
        contents = selector.xpath("//div[@class='stockcodec']/text()") if selector.xpath("//div[@class='stockcodec']/text()") else [u'none']
        for co in contents:
            content += co.strip()
        lines['content'] = content
        comments = selector.xpath("//div[@id='zwlist']")
        for item in comments:
            if item.xpath("div[@class='zwli clearfix']"):
                name = ''
                names = item.xpath("div/div/div/div[@class='zwlianame']/span/a/text()")
                for na in names:
                    name += na
                comment = ''
                comments = item.xpath("div/div/div/div[@class='zwlitext stockcodec']/text()")
                for co in comments:
                    comment += co
                time = ''
                times = item.xpath("div/div/div/div[@class='zwlitime']/text()")
                for ti in times:
                    time += ti
                lines['name'] = name
                lines['comment'] = comment
                lines['time'] = time
            else:
                lines['name'] = 'none'
                lines['comment'] = 'none'
                lines['time'] = 'none'
            rows.append(lines)
    return rows


if __name__ == "__main__":
    headlines1 = ['title', 'author', 'read', 'comment_num', 'post_time', 'last_update', 'link', 'complete_link']
    headlines2 = ['content', 'name', 'comment', 'time']
    #    get_total_page(start_url)
    rows1 = parse_title()
    rows2 = parse_content_comment()
    with open('eastmoney1.csv', 'wb') as f:
        f_csv = csv.DictWriter(f, headlines1)
        f_csv.writeheader()
        f_csv.writerows(rows1)
    with open('eastmoney2.csv', 'wb') as f:
        f_csv = csv.DictWriter(f, headlines2)
        f_csv.writeheader()
        f_csv.writerows(rows2)

结果

之前不知道为何会乱码,这个星期这个作业就一直在我心里纠缠着我,今天冷静的分析了一下原因,终于发现了问题,别提有多开心了。

问题就出在这里
html = requests.get(url=url, headers=headers).text
html = requests.get(url=url, headers=headers).content

看源码

    @property
    def text(self):
        """Content of the response, in unicode.

        If Response.encoding is None, encoding will be guessed using
        ``chardet``.

        The encoding of the response content is determined based solely on HTTP
        headers, following RFC 2616 to the letter. If you can take advantage of
        non-HTTP knowledge to make a better guess at the encoding, you should
        set ``r.encoding`` appropriately before accessing this property.
        """

    #content的完整代码就不贴了。
    @property
    def content(self):
        """Content of the response, in bytes."""

.text返回的是Unicode型的数据。
.content返回的是bytes型也就是二进制的数据
之前一直用.content来解析所以一直出错。
心头只恨总算除掉了!又涨经验了!

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容

  • 说明:本文是我在readthedocs看到的,觉得很不错所以转载过来,有删改,原文地址点这里。 实用Unicode...
    aurora阅读 997评论 0 6
  • Spring Cloud为开发人员提供了快速构建分布式系统中一些常见模式的工具(例如配置管理,服务发现,断路器,智...
    卡卡罗2017阅读 134,859评论 18 139
  • 1. Java基础部分 基础部分的顺序:基本语法,类相关的语法,内部类的语法,继承相关的语法,异常的语法,线程的语...
    子非鱼_t_阅读 31,742评论 18 399
  • 01 年初,刚上高中的小表弟,突然性情大变:不仅迷上游戏,不能自拔,而且对阿姨的劝说不理不睬,甚至恶语伤人。 阿姨...
    遇见唐姑娘阅读 1,719评论 16 23
  • (国教 魏军)12月16日至17日,国教中心举行了第四次月考。开考前,刘同华校长对月考的组织及监考做提出了相关...
    淮滨高中魏军阅读 326评论 0 0