【Python爬虫】爬取城市

import requests,json,csv
from lxml import etree


# url = '/match/team_players.htm?divisionId=874902863023837184&teamId=892361870411960321'
# res = requests.get(url).text
# print(res)
# select = etree.HTML(res)
# name = select.xpath('//span[@class="player-name-value player-short-words"]/text()')
# print(name)

# url = "/match/team_players_json.htm?divisionId=874902863023837184&teamId=892361870411960321&page=2"
# res = requests.post(url).text
# data = json.loads(res)['data']
# count = data['count']
# print(count)


def post_team_name(url):
    try:
        res = requests.post(url.format(1)).text
        data = json.loads(res)['data']
        count = data['count']
        # print(count % 10)
        #如果最后一页不为0,整除后需要加1,在加1是为了for的时候最后
        pages = count // 10 + 2 if count % 10 > 0 else count // 10 + 1
        # print(pages)
        for page in range(1,pages):
            page_res = requests.post(url.format(page)).text
            page_data = json.loads(page_res)['data']
            for list in page_data['list']:
                teamName = list['teamName']
                teamId = list['teamId']
                teamPic = list['teamPic']
                # print(teamName,teamId,teamPic)

                #因为网页有json数据还有网页的可以用xpath来解析网页
                #使用json解析
                # post_players_information(teamId,teamName)

                #使用xpath解析
                post_xpath_players_information(teamId,teamName)
    except Exception as e:
        print("post_team_name函数解析错误 错误为:",e)

#使用Json解析获得数据
def post_players_information(teamId,teamName):
    try:
        play_url = "/match/team_players_json.htm?divisionId=874902863023837184&teamId={}".format(
            teamId)
        res = requests.post(play_url).text
        play_data = json.loads(res)['data']
        count = play_data['count']
        # print(count)
        pages = count // 10 + 2 if count % 10 > 0 else count // 10 + 1
        # print(pages)
        for page in range(1,pages):
            url = "/match/team_players_json.htm?divisionId=874902863023837184&teamId={}&page={}".format(
                teamId,page)
            res = requests.post(url).text
            play_data = json.loads(res)['data']
            for list in play_data['list']:
                print(teamName, teamId, count,list['nickname'],list['clothNo'],list['area'],list['age'],list['height'],list['weight'])
                writer.writerow((teamName, teamId, count,list['nickname'],list['clothNo'],list['area'],list['age'],list['height'],list['weight']))
    except Exception as e:
        print("post_players_information函数解析错误 错误为:",e)

#使用xpath解析获得数据
def post_xpath_players_information(teamId,teamName):

    try:
        play_url = "/match/team_players_json.htm?divisionId=874902863023837184&teamId={}".format(
            teamId)
        res = requests.post(play_url).text
        play_data = json.loads(res)['data']
        count = play_data['count']
        # print(count)
        pages = count // 10 + 2 if count % 10 > 0 else count // 10 + 1
        # print(pages)
        for page in range(1, pages):
            url = "/match/team_players.htm?divisionId=874902863023837184&teamId={}&page={}".format(teamId,page)
            res = requests.post(url).text
            select = etree.HTML(res)
            data = select.xpath('//div[@class="player-detail"]')
            # 名字
            names = select.xpath('//span[@class="player-name-value player-short-words"]/text()')
            # 球衣号
            nums = select.xpath('//span[@class="player-num-value"]/text()')
            # 地区
            addresss = select.xpath('//span[@class="player-address-value player-short-words"]/text()')
            # 年龄
            ages = select.xpath('//span[@class="player-age-value"]/text()')
            # 身高
            heights = select.xpath('//span[@class="player-height-value"]/text()')
            # 体重
            weights = select.xpath('//span[@class="player-weight-value"]/text()')
            for i in range(0,len(nums)):
                name = names[i]
                num = nums[i]
                address = addresss[i]
                age = ages[i]
                height = heights[i]
                weight = weights[i]
                print(teamName, teamId, count, name, num, address, age, height, weight)

                writer.writerow((teamName, teamId, count, name, num, address, age, height, weight))
    except Exception as e:
        print("post_xpath_players_information函数解析错误 错误为:", e)



#队伍对阵信息
def post_team_game(url):
    try:
        res = requests.post(url).text
        select = etree.HTML(res)

        game_times = select.xpath('//div[@class="event-time"]/span[1]/text()')
        game_sorts = select.xpath('//div[@class="event-time"]/span[2]/text()')
        team_one_names = select.xpath('//div[@class="team-one"]/span[@class="team-name"]/text()')
        team_one_scores = select.xpath('//div[@class="team-one"]/span[@class="team-score"]/text()')
        team_two_names = select.xpath('//div[@class="team-two"]/span[@class="team-name"]/text()')
        team_two_scores = select.xpath('//div[@class="team-two"]/span[@class="team-score"]/text()')

        for i in range(0,len(game_times)):
            game_time = game_times[i]
            game_sort = game_sorts[i]
            team_one_name = team_one_names[i]
            team_one_score = team_one_scores[i]
            team_two_name = team_two_names[i]
            team_two_score = team_two_scores[i]
            print('{} {}, {} {}分 对阵 {} {}分'.format(game_time,game_sort,team_one_name,team_one_score,team_two_name,team_two_score))

    except Exception as e:
        print("post_team_game函数解析错误 错误为:", e)



if __name__ == "__main__":
    f = open('chengshichuanqi.csv', 'w+', encoding='utf-8')
    writer = csv.writer(f)
    writer.writerow(('战队名称', '战队id', '队员数', '队员名', '球衣号', '队员地区','队员年龄','队员身高','队员体重'))
    base_url = '/match/join_teams_json.htm?divisionId=874902863023837184&page={}'
    game_url = '/match/success_events.htm?divisionId=874902863023837184'
    post_team_game(game_url)
    post_team_name(base_url)
屏幕快照 2017-08-08 下午12.57.16.png
屏幕快照 2017-08-08 下午8.27.07.png
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 219,427评论 6 508
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 93,551评论 3 395
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 165,747评论 0 356
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 58,939评论 1 295
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 67,955评论 6 392
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 51,737评论 1 305
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 40,448评论 3 420
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 39,352评论 0 276
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 45,834评论 1 317
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 37,992评论 3 338
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 40,133评论 1 351
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 35,815评论 5 346
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 41,477评论 3 331
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 32,022评论 0 22
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 33,147评论 1 272
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 48,398评论 3 373
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 45,077评论 2 355

推荐阅读更多精彩内容