我一直在尝试从SendinBlue应用程序接口中获取数据。问题是API对每个调用有100个寄存器的限制,并且我的Python循环不能正常工作。这就是我到目前为止所拥有的,调用工作得很好。
import requests
import pandas as pd
from pandas import json_normalize
import json
results = []
pagination = 0
url = "https://api.sendinblue.com/v3/smtp/statistics/events"
querystring = {"limit":"100","offset":pagination,"days":"15"}
headers = {
"Accept": "application/json",
"api-key": "XXXXXXX"
}
#respuesta de la API
response = requests.request("GET", url, headers=headers, params=querystring)
#convertir json a diccionario
data = json.loads(response.text)
#convertir diccionario a DataFrame
base = pd.json_normalize(data,record_path='events')数据结构如下:
{'events': [
{'email': 'chusperu@gmail.com',
'date': '2020-10-18T17:18:58.000-05:00',
'subject': 'Diego, ¡Gracias por registrarte! ?',
'messageId': '<202010181429.12179607081@smtp-relay.mailin.fr>',
'event': 'opened',
'tag': '',
'from': 'ventas01@grupodymperu.com',
{'email': 'cynthiaapurimac@gmail.com',
'date': '2020-10-18T17:52:56.000-05:00',
'subject': 'Alvarado, ¡Gracias por registrarte! ?',
'messageId': '<202010182252.53640747487@smtp-relay.mailin.fr>',
'event': 'requests',
'tag': '',
'from': 'ventas01@grupodymperu.com'},
....我尝试过的循环是这样的,但它只对前200个寄存器进行了分页。我哪里做错了?
for i in data['events']:
results.append(i)
while response.status_code == 200:
pagination += 100
querystring ['offset'] = pagination
response = requests.request("GET", url, headers=headers, params=querystring)
data = json.loads(response.text)
for i in data['events']:
results.append(i)
else:
break
print(results)发布于 2020-11-06 03:54:15
终于得到它了。
import requests
import pandas as pd
from pandas import json_normalize
import json
# Excel = "C:/Users/User/PycharmProjects/Primero/DataSendin.xlsx"
pagination = 0
url = "https://api.sendinblue.com/v3/smtp/statistics/events"
querystring = {"limit":"100","offset":f"{pagination}","days":"3"}
headers = {
"Accept": "application/json",
"api-key": "Your API key"
}
response = requests.request("GET", url, headers=headers, params=querystring)
#respuesta de la API
try:
#convertir json a diccionario
results = []
data = json.loads(response.text)
results.append(data)
if not data:
print("no hay data")
else:
while response.status_code == 200:
pagination += 100
querystring ['offset'] = pagination
response = requests.request("GET", url, headers=headers, params=querystring)
data = json.loads(response.text)
results.append(data)
if not data:
break
except ValueError:
"no data"
#convertir diccionario a DataFrame
final = list(filter(None, results))
base = pd.json_normalize(final,record_path='events')
basehttps://stackoverflow.com/questions/64655409
复制相似问题