Google oauth + django - cant authenticate from saved data - python

Okay, so i got the flow working, but it only works on the current session, if i try to load the data in, even with just a refresh the credentials dont hold up.
So this works which is where oauth redirects to after user accepts prompts:
def oauth_redir(request):
u=Employee.objects.filter(dashboard_user=request.user)
if not u:
u=Employee.objects.create(dashboard_user=request.user)
else:
u=u[0]
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
CLIENT_SECRETS_FILE, scopes=SCOPES, state=request.GET.get("state"))
flow.redirect_uri = REDIRECT_URL
flow.fetch_token(authorization_response=request.build_absolute_uri().replace("http:","https:"))
#saving credentials for future use
credentials = flow.credentials
if not Emp_google_creds.objects.filter(employee=u):
Emp_google_creds.objects.create(
token= credentials.token,
refresh_token= credentials.refresh_token,
token_uri = credentials.token_uri,
client_id = credentials.client_id,
client_secret= credentials.client_secret,
scopes = " ".join(credentials.scopes),
employee = u
)
else:
creds=Emp_google_creds.objects.get(employee=u)
creds.token = credentials.token,
creds.refresh_token = credentials.refresh_token,
creds.token_uri = credentials.token_uri,
creds.client_id = credentials.client_id,
creds.client_secret = credentials.client_secret,
creds.scopes = " ".join(credentials.scopes),
creds.save()
if not credentials or not credentials.valid:
print(credentials, credentials.valid, credentials.expiry)
# credentials.refresh(Request())
# return redirect("/calendar/cal_auth/")
try:
service = googleapiclient.discovery.build('calendar', 'v3', credentials=credentials)
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
return
# Prints the start and name of the next 10 events
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
except HttpError as error:
print('An error occurred: %s' % error)
print("end of redir func")
return render(request,"schedule.html")
this loads the schedule as expected. however if you refresh in order to use the stored credentials they never work, they never refresh, i'm unsure on what to do.
def calendar_home(request):
u = Employee.objects.filter(dashboard_user=request.user)
if not u:
u = Employee.objects.create(dashboard_user=request.user)
else:
u = u[0]
print(Emp_google_creds.objects.filter(employee=u))
if not Emp_google_creds.objects.filter(employee=u):
return redirect("/calendar/cal_auth/")
else:
creds=Emp_google_creds.objects.filter(employee=u)[0]
gcreds=google.oauth2.credentials.Credentials.from_authorized_user_info(creds.to_dict())
if not gcreds or not gcreds.valid:
print(gcreds, gcreds.valid, gcreds.expiry)
# gcreds.refresh(Request())
return redirect("/calendar/cal_auth/")
try:
service = googleapiclient.discovery.build('calendar', 'v3', credentials=gcreds)
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
return
# Prints the start and name of the next 10 events
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
except HttpError as error:
print('An error occurred: %s' % error)
return render(request,"schedule.html")
basically this:
print(gcreds, gcreds.valid, gcreds.expiry) =>
<google.oauth2.credentials.Credentials object at 0x7f322a00b340> False 2022-06-03 14:44:02.232624
and if i comment the redirect out i get:
('invalid_client: The OAuth client was not found.', {'error': 'invalid_client', 'error_description': 'The OAuth client was not found.'})
caused on this line:
events_result = service.events().list(calendarId='primary', timeMin=now, ...

I just went through all of this last week. Maybe try building the credentials manually like I did instead of using Credentials.from_authorized_user_info(). I found out the hard way that Google likes to change things without updating their documentation.
user_creds = json.loads(Emp_google_creds.objects.filter(employee=u)[0])
creds=Credentials(
token=user_creds['token'],
refresh_token=user_creds['refresh_token'],
token_uri=user_creds['token_uri'],
client_id=user_creds['client_id'],
client_secret=user_creds['client_secret']
)
service = build('calendar', 'v3', credentials=creds)

Related

Google API Multi-Processing

I'm trying to grab specific information from emails under my Gmail account (Subject, From, Date, Message Body) and was able to do so succesfully using the Google API and relevant libraries, however, I've noticed the more emails you have the longer it takes to parse, so much so that parsing 34 emails takes nearly 15 seconds, which is bad if you tried to scale that to parsing 1000 emails. My aim was to utilise concurrency/multi-processing on the parse_messages() function, however, I've had no luck and keep returning an empty list. The aim is to process all the emails, then append them all to a the combined list.
Apologies for the sloppyness, it's yet to be cleaned up, there's less than 100 lines in total.
from __future__ import print_function
import os.path
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from concurrent.futures import ProcessPoolExecutor
import base64
import re
combined = []
def authenticate():
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
creds = None
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'creds.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.json', 'w') as token:
token.write(creds.to_json())
return creds
def get_messages(creds):
# Get the messages
days = 31
service = build('gmail', 'v1', credentials=creds)
results = service.users().messages().list(userId='me', q=f'newer_than:{days}d, in:inbox').execute()
messages = results.get('messages', [])
message_count = len(messages)
print(f"You've received {message_count} email(s) in the last {days} days")
if not messages:
print(f'No Emails found in the last {days} days.')
return messages
def parse_message(msg):
# Call the Gmail API
service = build('gmail', 'v1', credentials=creds)
txt = service.users().messages().get(userId='me', id=msg['id']).execute()
payload = txt['payload']
headers = payload['headers']
#Grab the Subject Line, From and Date from the Email
for d in headers:
if d['name'] == 'Subject':
subject = d['value']
if d['name'] == 'From':
sender = d['value']
try:
match = re.search(r'<(.*)>', sender).group(1)
except:
match = sender
if d['name'] == "Date":
date_received = d['value']
def get_body(payload):
if 'body' in payload and 'data' in payload['body']:
return payload['body']['data']
elif 'parts' in payload:
for part in payload['parts']:
data = get_body(part)
if data:
return data
else:
return None
data = get_body(payload)
data = data.replace("-","+").replace("_","/")
decoded_data = base64.b64decode(data).decode("UTF-8")
decoded_data = (decoded_data.encode('ascii', 'ignore')).decode("UTF-8")
decoded_data = decoded_data.replace('\n','').replace('\r','').replace('\t', '')
# Append parsed message to shared list
return combined.append([date_received, subject, match, decoded_data])
if __name__ == '__main__':
creds = authenticate()
messages = get_messages(creds)
# Create a process pool with 4 worker processes
with ProcessPoolExecutor(max_workers=4) as executor:
# Submit the parse_message function for each message in the messages variable
executor.map(parse_message, messages)
print(f"Combined: {combined}")
When running the script, my output is normally.
You've received 34 email(s) in the last 31 days
combined: []
Thanks to the help of simpleApp, I made their changes along with a few others to get this working.
# Append parsed message to shared list
return [date_received, subject, match, decoded_data]
if __name__ == '__main__':
creds = authenticate()
messages, service = get_messages(creds)
# Create a process pool with default worker processes
with ProcessPoolExecutor() as executor:
combined = []
# Submit the parse_message function for each message in the messages variable
all_pools = executor.map(parse_message, messages, [service]*len(messages))
for e_p in all_pools:
combined.append(e_p)

Script blocking in the Listener event

I'm trying to get the last email receirved using exchangelib listener;
the probleme here is that the code not printing the seconde print(account.inbox.all().count()) ,
and the first print(account.inbox.all().count()) printinig it fine,
see the result below the code
creds = Credentials(
username="domaine\\user",
password="password"
)
def main():
print("started !")
config = Configuration(server='server', credentials=creds)
account = Account(
primary_smtp_address="mail",
autodiscover=False,
config=config,
access_type=DELEGATE,
default_timezone=UTC
)
listener = Listener(account)
print(account.inbox.all().count())
def new_messaged_received():
print("---------------------------------new mail arrived----------------------------------------------");
for item in account.inbox.all().only('subject').order_by('-datetime_received')[:1]:
print(item.subject)
listener.streaming_event_received += new_messaged_received
listener.listen(NewMailEvent)
the result after receirving a new email :
7503
---------------------------------new mail arrived----------------------------------------------

how make use of multiprocessing in python to make api calls simultaneously

// function to verify user login
def get_authenticated_service(file,client_secret_file):
credentials = None
if os.path.exists(file):
print('Loading Credentials From File...')
with open(file, 'rb') as token:
credentials = pickle.load(token)
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
print('Refreshing Access Token...')
credentials.refresh(Request())
else:
print('Fetching New Tokens...')
flow = InstalledAppFlow.from_client_secrets_file(
client_secret_file,
scopes=[
'https://www.googleapis.com/auth/youtube.readonly',
'https://www.googleapis.com/auth/youtube.upload',
'https://www.googleapis.com/auth/youtube.force-ssl'
]
)
flow.run_local_server(port=8000, prompt='consent',
authorization_prompt_message='')
credentials = flow.credentials
# Save the credentials for the next run
with open(file, 'wb') as f:
print('Saving Credentials for Future Use...')
pickle.dump(credentials, f)
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
//function for verify user and make https body request
def initialize_upload( options, file, tokenFile, client_secret_file):
youtube = get_authenticated_service(tokenFile,client_secret_file)
tags = None
if options.keywords:
tags = options.keywords.split(',')
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
print("running")
insert_request = youtube.videos().insert(
part=','.join(body.keys()),
body=body,
media_body=MediaFileUpload(file, chunksize=-1, resumable=True)
)
print("multiprocessing start")
resumable_upload(insert_request)
print("multiprocessing ends")
//function for uploading file
def resumable_upload(request):
response = None
error = None
retry = 0
print("running by " + multiprocessing.current_process().name)
while response is None:
try:
print('Uploading file...')
status, response = request.next_chunk()
print('Uploading file done...')
if response is not None:
if 'id' in response:
print('Video id "%s" was successfully uploaded.' % response['id'])
else:
exit('The upload failed with an unexpected response: %s' % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = 'A retriable HTTP error %d occurred:\n%s' % (e.resp.status,e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = 'A retriable error occurred: %s' % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit('No longer attempting to retry.')
max_sleep = 2 ** retry
# sleep_seconds = random.random() * max_sleep
sleep_seconds = 0.5
print('Sleeping %f seconds and then retrying...' % sleep_seconds)
time.sleep(sleep_seconds)
// then title, description etc passing as arguments and making multiprocessing call
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--title', help='Video title', default='Android react native code sample(ANdroid emulator)')
parser.add_argument('--description', help='Android react native code sample description',
default='Android react native code sample description(ANdroid emulator)')
parser.add_argument('--category', default='22',
help='Numeric video category. ' +
'See https://developers.google.com/youtube/v3/docs/videoCategories/list')
parser.add_argument('--keywords', help='react native, react, android, emulator',
default='react native, react')
parser.add_argument('--privacyStatus', choices=VALID_PRIVACY_STATUSES,
default='private', help='Video privacy status.')
args = parser.parse_args()
try:
l = []
start = time.perf_counter()
t1 = multiprocessing.Process(target=initialize_upload, args=(args, "android1.mp4", 'token1.pickle','client_secret.json'))
t2 = multiprocessing.Process(target=initialize_upload, args=(args, "android1.mp4", 'token2.pickle','client_secret.json'))
t1.start()
t2.start()
t1.join()
t2.join()
finish = time.perf_counter()
print(f'Finished in {round(finish-start, 2)} seconds..')
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
if i try to make this call it takes 12.09 secs and without multiprocessing (i.e calling simultaneously one after another it takes 12.39 secs). after debugging little bit i get to know that till print("uploading file...) (in resumable_upload()) its working properly but after that its like working one at a time. Can anyone suggest how i can make him work with multiprocessing ? below is the output of this file

How to mock functionality of boto3 module using pytest

I have a custom module written called sqs.py. The script will do the following:
Get a message from AWS SQS
Get the AWS S3 path to delete
Delete the path
Send a confirmation email to the user
I'm trying to write unit tests for this module that will verify the code will execute as expected and that it will raise exceptions when they do occur.
This means I will need to mock the response from Boto3 calls that I make. My problem is that the code will first establish the SQS client to obtain the message and then a second call to establish the S3 client. I'm not sure how to mock these 2 independent calls and be able to fake a response so I can test my script's functionality. Perhaps my approach is incorrect. At any case, any advice on how to do this properly is appreciated.
Here's how the code looks like:
import boto3
import json
import os
import pprint
import time
import asyncio
import logging
from send_email import send_email
queue_url = 'https://xxxx.queue.amazonaws.com/1234567890/queue'
def shutdown(message):
""" Sends shutdown command to OS """
os.system(f'shutdown +5 "{message}"')
def send_failure_email(email_config: dict, error_message: str):
""" Sends email notification to user with error message attached. """
recipient_name = email_config['recipient_name']
email_config['subject'] = 'Subject: Restore Failed'
email_config['message'] = f'Hello {recipient_name},\n\n' \
+ 'We regret that an error has occurred during the restore process. ' \
+ 'Please try again in a few minutes.\n\n' \
+ f'Error: {error_message}.\n\n' \
try:
send_email(email_config)
except RuntimeError as error_message:
logging.error(f'ERROR: cannot send email to user. {error_message}')
async def restore_s3_objects(s3_client: object, p_bucket_name: str, p_prefix: str):
"""Attempts to restore objects specified by p_bucket_name and p_prefix.
Returns True if restore took place, false otherwise.
"""
is_truncated = True
key_marker = None
key = ''
number_of_items_restored = 0
has_restore_occured = False
logging.info(f'performing restore for {p_bucket_name}/{p_prefix}')
try:
while is_truncated == True:
if not key_marker:
version_list = s3_client.list_object_versions(
Bucket = p_bucket_name,
Prefix = p_prefix)
else:
version_list = s3_client.list_object_versions(
Bucket = p_bucket_name,
Prefix = p_prefix,
KeyMarker = key_marker)
if 'DeleteMarkers' in version_list:
logging.info('found delete markers')
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
if d['IsLatest'] == True:
key = d['Key']
version_id = d['VersionId']
s3_client.delete_object(
Bucket = p_bucket_name,
Key = key,
VersionId = version_id
)
number_of_items_restored = number_of_items_restored + 1
is_truncated = version_list['IsTruncated']
logging.info(f'is_truncated: {is_truncated}')
if 'NextKeyMarker' in version_list:
key_marker = version_list['NextKeyMarker']
if number_of_items_restored > 0:
has_restore_occured = True
return has_restore_occured
except Exception as error_message:
raise RuntimeError(error_message)
async def main():
if 'AWS_ACCESS_KEY_ID' in os.environ \
and 'AWS_SECRET_ACCESS_KEY' in os.environ \
and os.environ['AWS_ACCESS_KEY_ID'] != '' \
and os.environ['AWS_SECRET_ACCESS_KEY'] != '':
sqs_client = boto3.client(
'sqs',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
verify=False
)
s3_client = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
verify=False
)
else:
sqs_client = boto3.client(
'sqs',
verify=False,
)
s3_client = boto3.client(
's3',
verify=False,
)
received_message = sqs_client.receive_message(
QueueUrl=queue_url,
AttributeNames=['All'],
VisibilityTimeout=10,
WaitTimeSeconds=20, # Wait up to 20 seconds for a message to arrive
)
if 'Messages' in received_message \
and len(received_message['Messages']) > 0:
# NOTE: Initialize email configuration
receipient_email = 'support#example.com'
username = receipient_email.split('#')[0]
fullname_length = len(username.split('.'))
fullname = f"{username.split('.')[0]}" # Group name / First name only
if (fullname_length == 2): # First name and last name available
fullname = f"{username.split('.')[0]} {username.split('.')[1]}"
fullname = fullname.title()
email_config = {
'destination': receipient_email,
'recipient_name': fullname,
'subject': 'Subject: Restore Complete',
'message': ''
}
try:
receipt_handle = received_message['Messages'][0]['ReceiptHandle']
except Exception as error_message:
logging.error(error_message)
send_failure_email(email_config, error_message)
shutdown(f'{error_message}')
try:
data = received_message['Messages'][0]['Body']
data = json.loads(data)
logging.info('A SQS message for a restore has been received.')
except Exception as error_message:
message = f'Unable to obtain and parse message body. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
try:
bucket = data['bucket']
prefix = data['prefix']
except Exception as error_message:
message = f'Retrieving bucket name and prefix failed. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
try:
logging.info(f'Initiating restore for path: {bucket}/{prefix}')
restore_was_performed = await asyncio.create_task(restore_s3_objects(s3_client, bucket, prefix))
if restore_was_performed is True:
email_config['message'] = f'Hello {fullname},\n\n' \
+ f'The files in the path \'{bucket}/{prefix}\' have been restored. ' \
send_email(email_config)
logging.info('Restore complete. Shutting down.')
else:
logging.info('Path does not require restore. Shutting down.')
shutdown(f'shutdown +5 "Restore successful! System will shutdown in 5 mins"')
except Exception as error_message:
message = f'File restoration failed. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
try:
sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle,
)
except Exception as error_message:
message = f'Deleting restore session from SQS failed. {error_message}'
logging.error(message)
send_failure_email(email_config, message)
shutdown(f'{error_message}')
if __name__ == '__main__':
logging.basicConfig(filename='restore.log',level=logging.INFO)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
The only way I was able to mock Boto3 is rebuilding a small class that represents the actual method structure. This is because Boto3 uses dynamic methods and all the resource level methods are created at runtime.
This might not be industry standard but I wasn't able to get any of the methods I found on the internet to work most of the time and this worked pretty well for me and requires minimal effort (comparing to some of the solutions I found).
class MockClient:
def __init__(self, region_name, aws_access_key_id, aws_secret_access_key):
self.region_name = region_name
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.MockS3 = MockS3()
def client(self, service_name, **kwargs):
return self.MockS3
class MockS3:
def __init__(self):
self.response = None # Test your mock data from S3 here
def list_object_versions(self, **kwargs):
return self.response
class S3TestCase(unittest.TestCase):
def test_restore_s3_objects(self):
# Given
bucket = "testBucket" # Test this to something that somewahat realistic
prefix = "some/prefix" # Test this to something that somewahat realistic
env_vars = mock.patch.dict(os.environ, {"AWS_ACCESS_KEY_ID": "abc",
"AWS_SECRET_ACCESS_KEY": "def"})
env_vars.start()
# initialising the Session can be tricy since it has to be imported from
# the module/file that creates the session on actual code rather than
# where's a Session code is. In this case you might have to import from
# main rather than boto3.
boto3.session.Session = mock.Mock(side_effect=[
MockClient(region_name='eu-west-1',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'])])
s3_client = boto3.client('s3', verify=False)
# When
has_restore_occured = restore_s3_objects(s3_client, bucket, prefix)
# Then
self.assertEqual(has_restore_occured, False) # your expected result set
env_vars.stop()

google calendar acces from Python

I newbe on this communauty
till nov 17, I used google calendar V2 to turn on or off my swiming pool pump
I'm trying to upgrade my python script to Google API V3 but I'm not sure to understand everything:
I'm using (no, trying to use) following code get on internet :
# Inspired from 'Raspberry Pi as a Google Calender Alarm Clock'
# http://www.esologic.com/?p=634
from datetime import datetime
import logging, os, platform, re, time
from apiclient.discovery import build
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
from config import *
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Alarm():
system = platform.system().lower()
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE,
scope='https://www.googleapis.com/auth/calendar',
redirect_uri='http://localhost:8080/')
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(flow, storage)
# Google Calendar service connection
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http, developerKey=API_KEY)
def check_credentials(self):
if self.credentials is None or self.credentials.invalid == True:
credentials = run(self.flow, self.storage)
def calendar_event_query(self):
self.check_credentials()
today = datetime.today()
events = self.service.events().list(singleEvents=True, calendarId=CALENDAR_ID).execute()
for i, event in enumerate(events['items']):
name = event['summary'].lower()
start = event['start']['dateTime'][:-9]
description = event.get('description', '')
repeat = True if description.lower() == 'repeat' else False
now = today.strftime('%Y-%m-%dT%H:%M')
if start >= now:
logger.debug('Event #%s, Name: %s, Start: %s', i, name, start)
if start == now:
if name.startswith('say'):
name = re.sub(r'[^a-zA-Z0-9\s\']', '', name)
command = '{0} "{1}"'.format('say' if system == 'darwin' else 'espeak - ven+m2', name[4:])
logger.info('Event starting. Announcing \'%s\'...', name[4:])
else:
mp3_files = os.listdir(MP3_FOLDER)
mp3_name = name.replace(' ', '_') + '.mp3'
mp3_name = mp3_name if mp3_name in mp3_files else 'default.mp3'
command = 'mpg123 \'{}/{}\''.format(MP3_FOLDER, mp3_name)
logger.info('Event %s starting. Playing mp3 file %s...', name, mp3_name)
os.system(command)
if repeat == False:
time.sleep(60)
def poll(self):
logger.info('Polling calendar for events...')
self.calendar_event_query()
while True:
a = Alarm()
a.poll()
time.sleep(FREQUENCY_CHECK)
Of course, I created client ID and key ID on google consele
But, when I run my script, It's not working and I get a webpage on my raspberry with an error 400 : rederict_URI mismatch
An idea?

Categories

Resources