<返回更多

python爬虫之爬取疫情数据

2022-06-29    lawsom
加入收藏

#coding=utf-8

import requests

import os

import json

from jsonpath import jsonpath

import time

url='https://api.inews.qq.com/newsqa/v1/query/inner/publish/modules/list?modules=statisGradeCityDetail,diseaseh5Shelf'

heders = {

'User-Agent':'Mozilla/5.0 (windows NT 10.0; Win64; x64) AppleWebKit/537.36 (Khtml, like Gecko) Chrome/100.0.4896.127 Safari/537.36'

}

response=requests.get(url,headers=heders)

response.encoding='utf8'

ff=response.content

data = json.loads(ff)

kk = jsonpath(data, '$..diseaseh5Shelf..name')

ll = jsonpath(data,'$..today.confirm')

dic=dict(zip(kk,ll))

print(dic)

cc='%Y-%m-%d'

c=time.strftime(cc, time.localtime())

ft = open(c +'疫情'+ '.txt', 'w')

ft.write('疫情日期')

ft.write(c)

ft.write('n')

for i,j in dic.items():

#有疫情的地区

if j>0:

print(str(i))

print(str(j))

 

ft.write(str(i))

ft.write('--')

ft.write(str(j))

ft.write('n')

 

'''

#全部地区数据

print(str(i))

print(str(j))

ft.write(str(i))

ft.write('--')

ft.write(str(j))

ft.write('n')

ft.close()

'''

声明:本站部分内容来自互联网,如有版权侵犯或其他问题请与我们联系,我们将立即删除或处理。
▍相关推荐
更多资讯 >>>