AWS Lambda invoke from Codepipeline permission denied error - python

I've set my pipeline to invoke a AWS Lamba function. After running for 30 mins it shows the error
The AWS Lambda function cloudfront-invalidation failed to return a
result. Check the function to verify that it has permission to call
the PutJobSuccessResult action and that it made a call to
PutJobSuccessResult.
Lambda Role has Permissions to set PutJobSuccessResult and
Codepipeline Service role has permission to invoke lambda functions.
Here is my lambda code:
import boto3
import time
def lambda_handler(context, event):
sts_connection = boto3.client('sts')
acct_b = sts_connection.assume_role(
RoleArn="arn:aws:iam::1234567890:role/AssumeRole",
RoleSessionName="cross_acct_lambda"
)
ACCESS_KEY = acct_b['Credentials']['AccessKeyId']
SECRET_KEY = acct_b['Credentials']['SecretAccessKey']
SESSION_TOKEN = acct_b['Credentials']['SessionToken']
client = boto3.client(
'cloudfront',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
response = client.create_invalidation(
DistributionId='ABC',
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/*',
]
},
'CallerReference': str(time.time()).replace(".", "")
}
)
invalidation_id = response['Invalidation']['Id']
print("Invalidation created successfully with Id: " + invalidation_id)
pipeline = boto3.client('codepipeline')
response = pipeline.put_job_success_result(
jobId= event['CodePipeline.job']['id']
)
return response

Issue resolved. Updated lambda below:
import boto3
import time
import json
import logging
def lambda_handler(event, context):
sts_connection = boto3.client('sts')
acct_b = sts_connection.assume_role(
RoleArn="arn:aws:iam::123456789:role/CloudfrontAssumeRole",
RoleSessionName="cross_acct_lambda"
)
ACCESS_KEY = acct_b['Credentials']['AccessKeyId']
SECRET_KEY = acct_b['Credentials']['SecretAccessKey']
SESSION_TOKEN = acct_b['Credentials']['SessionToken']
client = boto3.client(
'cloudfront',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
response = client.create_invalidation(
DistributionId='ABCD',
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/*',
]
},
'CallerReference': str(time.time()).replace(".", "")
}
)
invalidation_id = response['Invalidation']['Id']
print("Invalidation created successfully with Id: " + invalidation_id)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.debug(json.dumps(event))
codepipeline = boto3.client('codepipeline')
job_id = event['CodePipeline.job']['id']
try:
logger.info('Success!')
response = codepipeline.put_job_success_result(jobId=job_id)
logger.debug(response)
except Exception as error:
logger.exception(error)
response = codepipeline.put_job_failure_result(
jobId=job_id,
failureDetails={
'type': 'JobFailed',
'message': f'{error.__class__.__name__}: {str(error)}'
}
)
logger.debug(response)

Related

how to add many ids to one list?

I have a code that writes down the user id and the name of the streamer, when the streamer starts the stream, the user who entered the command is notified.
How can I correctly add all user IDs of users so that it works for everyone, and not just for one
import requests
import pymongo
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
TOKEN = ''
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
scheduler = AsyncIOScheduler(timezone="Europe/Kiev")
client = pymongo.MongoClient('')
db = client['Users']
collection = db['twitch']
def add_user(streamer_name, chat_id):
collection.update_one({
"_id": streamer_name
}, {"$set": {
'online': '-',
'chat_id': chat_id
}}, upsert=True)
def set_online(streamers):
collection.update_one({
'_id': streamers
}, {'$set': {
'online': 'True'
}})
def set_offline(streamers):
collection.update_one({
'_id': streamers
}, {'$set': {
'online': 'False'
}})
async def check(streamer_name, chat_id):
client_id = ''
client_secret = ''
body = {
'client_id': client_id,
'client_secret': client_secret,
"grant_type": 'client_credentials'
}
r = requests.post('https://id.twitch.tv/oauth2/token', body)
keys = r.json()
headers = {
'Client-ID': client_id,
'Authorization': 'Bearer ' + keys['access_token']
}
all_records = collection.find()
users = list(all_records)
for i in users:
streamers = i['_id']
send_users = i['chat_id']
online = i['online']
stream = requests.get('https://api.twitch.tv/helix/streams?user_login=' + streamers, headers=headers)
stream_data = stream.json()
if len(stream_data['data']) == 1:
live = (streamers + ' is live: ' + stream_data['data'][0]['title'])
if online == 'False':
await bot.send_message(send_users, live)
set_online(streamers)
if online == 'True':
print('streamer online')
else:
set_offline(streamers)
scheduler.add_job(check, "interval", seconds=5, args=(streamer_name, chat_id))
#dp.message_handler(commands='check')
async def check_stream(message: types.Message):
streamer_name = message.text[7:]
chat_id = message.chat.id
add_user(streamer_name, chat_id)
await check(streamer_name, chat_id)
if __name__ == "__main__":
scheduler.start()
executor.start_polling(dp, skip_updates=True)
And when the streamer starts the stream, then many messages come in and not just one.

Unable to read Athena query into pandas dataframe

I have the below code, and want to get it to return a dataframe properly. The polling logic works, but the dataframe doesn't seem to get created/returned. Right now it just returns None when called.
import boto3
import pandas as pd
import io
import re
import time
AK='mykey'
SAK='mysecret'
params = {
'region': 'us-west-2',
'database': 'default',
'bucket': 'my-bucket',
'path': 'dailyreport',
'query': 'SELECT * FROM v_daily_report LIMIT 100'
}
session = boto3.Session(aws_access_key_id=AK,aws_secret_access_key=SAK)
# In[32]:
def athena_query(client, params):
response = client.start_query_execution(
QueryString=params["query"],
QueryExecutionContext={
'Database': params['database']
},
ResultConfiguration={
'OutputLocation': 's3://' + params['bucket'] + '/' + params['path']
}
)
return response
def athena_to_s3(session, params, max_execution = 5):
client = session.client('athena', region_name=params["region"])
execution = athena_query(client, params)
execution_id = execution['QueryExecutionId']
df = poll_status(execution_id, client)
return df
def poll_status(_id, client):
'''
poll query status
'''
result = client.get_query_execution(
QueryExecutionId = _id
)
state = result['QueryExecution']['Status']['State']
if state == 'SUCCEEDED':
print(state)
print(str(result))
s3_key = 's3://' + params['bucket'] + '/' + params['path']+'/'+ _id + '.csv'
print(s3_key)
df = pd.read_csv(s3_key)
return df
elif state == 'QUEUED':
print(state)
print(str(result))
time.sleep(1)
poll_status(_id, client)
elif state == 'RUNNING':
print(state)
print(str(result))
time.sleep(1)
poll_status(_id, client)
elif state == 'FAILED':
return result
else:
print(state)
raise Exception
df_data = athena_to_s3(session, params)
print(df_data)
I plan to move the dataframe load out of the polling function, but just trying to get it to work as is right now.
I recommend you to take a look at AWS Wrangler instead of using the traditional boto3 Athena API. This newer and more specific interface to all things data in AWS including queries to Athena and giving more functionality.
import awswrangler as wr
df = wr.pandas.read_sql_athena(
sql="select * from table",
database="database"
)
Thanks to #RagePwn comment it is worth checking PyAthena as an alternative to the boto3 option to query Athena.
If it is returning None, then it is because state == 'FAILED'. You need to investigate the reason it failed, which may be in 'StateChangeReason'.
{
'QueryExecution': {
'QueryExecutionId': 'string',
'Query': 'string',
'StatementType': 'DDL'|'DML'|'UTILITY',
'ResultConfiguration': {
'OutputLocation': 'string',
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS',
'KmsKey': 'string'
}
},
'QueryExecutionContext': {
'Database': 'string'
},
'Status': {
'State': 'QUEUED'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED',
'StateChangeReason': 'string',
'SubmissionDateTime': datetime(2015, 1, 1),
'CompletionDateTime': datetime(2015, 1, 1)
},
'Statistics': {
'EngineExecutionTimeInMillis': 123,
'DataScannedInBytes': 123,
'DataManifestLocation': 'string',
'TotalExecutionTimeInMillis': 123,
'QueryQueueTimeInMillis': 123,
'QueryPlanningTimeInMillis': 123,
'ServiceProcessingTimeInMillis': 123
},
'WorkGroup': 'string'
}
}
Just to elaborate on the RagePwn's answer of using PyAthena -that's what I ultimately did as well. For some reason AwsWrangler choked on me and couldn't handle the JSON that was being returned from S3. Here's the code snippet that worked for me based on PyAthena's PyPi page
import os
from pyathena import connect
from pyathena.util import as_pandas
aws_access_key_id = os.getenv('ATHENA_ACCESS_KEY')
aws_secret_access_key = os.getenv('ATHENA_SECRET_KEY')
region_name = os.getenv('ATHENA_REGION_NAME')
staging_bucket_dir = os.getenv('ATHENA_STAGING_BUCKET')
cursor = connect(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
s3_staging_dir=staging_bucket_dir,
).cursor()
cursor.execute(sql)
df = as_pandas(cursor)
The above assumes you have defined as environment variables the following:
ATHENA_ACCESS_KEY: the AWS access key id for your AWS account
ATHENA_SECRET_KEY: the AWS secret key
ATHENA_REGION_NAME: the AWS region name
ATHENA_STAGING_BUCKET: a bucket in the same account that has the correct access settings (explanation of which is outside the scope of this answer)

how to pass json object directly to train in rasa nlu from python

I am using rasa nlu to train data. as per the documentation in http://nlu.rasa.ai/python.html , following code has to be used to train data that exists in the file demo-rasa.json
from rasa_nlu.converters import load_data
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.model import Trainer
training_data = load_data('data/examples/rasa/demo-rasa.json')
trainer = Trainer(RasaNLUConfig("sample_configs/config_spacy.json"))
trainer.train(training_data)
model_directory = trainer.persist('./projects/default/')
But instead how do we read data from a json object for training.
If you look at the implementation of load_data, it performs two steps:
guess the file format
load the file using the appropriate loading method
The simplest solution would be to write your json object into a file or StringIO object.
Alternatively, you could pick the specific loading function you need, for example load_rasa_data and seperate the file reading from it. For this example, you could probably just take the whole function and remove the line data = _read_json_from_file(filename).
I am somewhat surprised to see that currently there is no way to read an already loaded json object. If you decide to adapt the functions to this, you might consider writing a pull request for it.
I have made a flask app which takes the JSON object from request body, instead of reading it from file.
This code converts an existing LUIS json using spaCy for entities and sklearn-crfsuite for intent recognition.
from flask import Flask, jsonify, request
from flask_cors import CORS
import json, os, msvcrt, psutil, subprocess, datetime
app = Flask(__name__)
CORS(app)
with app.app_context():
with app.test_request_context():
#region REST based RASA API
serverExecutablePID = 0
hasAPIStarted = False
configFileDirectory = "C:\\Code\\RasaAPI\\RASAResources\\config"
chitChatModel = "ChitChat"
assetsDirectory = "C:\\Code\\RasaAPI\\RASAResources"
def createSchema(SchemaPath, dataToBeWritten):
try:
#write LUIS or RASA JSON Schema in json file locking the file to avoid race condition using Python's Windows msvcrt binaries
with open(SchemaPath, "w") as SchemaCreationHandle:
msvcrt.locking(SchemaCreationHandle.fileno(), msvcrt.LK_LOCK, os.path.getsize(SchemaPath))
json.dump(dataToBeWritten, SchemaCreationHandle, indent = 4, sort_keys=False)
SchemaCreationHandle.close()
#Check if written file actually exists on disk or not
doesFileExist = os.path.exists(SchemaPath)
return doesFileExist
except Exception as ex:
return str(ex.args)
def appendTimeStampToModel(ModelName):
return ModelName + '_{:%Y%m%d-%H%M%S}.json'.format(datetime.datetime.now())
def appendTimeStampToConfigSpacy(ModelName):
return ModelName + '_config_spacy_{:%Y%m%d-%H%M%S}.json'.format(datetime.datetime.now())
def createConfigSpacy(ModelName, DataPath, ConfigSpacyPath, TrainedModelsPath, LogDataPath):
try:
with open(ConfigSpacyPath, "w") as configSpacyFileHandle:
msvcrt.locking(configSpacyFileHandle.fileno(), msvcrt.LK_LOCK, os.path.getsize(ConfigSpacyPath))
configDataToBeWritten = dict({
"project": ModelName,
"data": DataPath,
"path": TrainedModelsPath,
"response_log": LogDataPath,
"log_level": "INFO",
"max_training_processes": 1,
"pipeline": "spacy_sklearn",
"language": "en",
"emulate": "luis",
"cors_origins": ["*"],
"aws_endpoint_url": None,
"token": None,
"num_threads": 2,
"port": 5000
})
json.dump(configDataToBeWritten, configSpacyFileHandle, indent = 4, sort_keys=False)
return os.path.getsize(ConfigSpacyPath) > 0
except Exception as ex:
return str(ex.args)
def TrainRASA(configFilePath):
try:
trainingString = 'start /wait python -m rasa_nlu.train -c ' + '\"' + os.path.normpath(configFilePath) + '\"'
returnCode = subprocess.call(trainingString, shell = True)
return returnCode
except Exception as ex:
return str(ex.args)
def StartRASAServer(configFileDirectory, ModelName):
#region Server starting logic
try:
global hasAPIStarted
global serverExecutablePID
#1) for finding which is the most recent config_spacy
root, dirs, files = next(os.walk(os.path.normpath(configFileDirectory)))
configFiles = [configFile for configFile in files if ModelName in configFile]
configFiles.sort(key = str.lower, reverse = True)
mostRecentConfigSpacy = os.path.join(configFileDirectory, configFiles[0])
serverStartingString = 'start /wait python -m rasa_nlu.server -c ' + '\"' + os.path.normpath(mostRecentConfigSpacy) + '\"'
serverProcess = subprocess.Popen(serverStartingString, shell = True)
serverExecutablePID = serverProcess.pid
pingReturnCode = 1
while(pingReturnCode):
pingReturnCode = os.system("netstat -na | findstr /i 5000")
if(pingReturnCode == 0):
hasAPIStarted = True
return pingReturnCode
except Exception as ex:
return jsonify({"message": "Failed because: " + str(ex.args) , "success": False})
#endregion
def KillProcessWindow(hasAPIStarted, serverExecutablePID):
if(hasAPIStarted == True and serverExecutablePID != 0):
me = psutil.Process(serverExecutablePID)
for child in me.children():
child.kill()
#app.route('/api/TrainRASA', methods = ['POST'])
def TrainRASAServer():
try:
#get request body of POST request
postedJSONData = json.loads(request.data, strict = False)
if postedJSONData["data"] is not None:
print("Valid data")
#region JSON file building logic
modelName = postedJSONData["modelName"]
modelNameWithExtension = appendTimeStampToModel(modelName)
schemaPath = os.path.join(assetsDirectory, "data", modelNameWithExtension)
print(createSchema(schemaPath, postedJSONData["data"]))
#endregion
#region config file creation logic
configFilePath = os.path.join(assetsDirectory, "config", appendTimeStampToConfigSpacy(modelName))
logsDirectory = os.path.join(assetsDirectory, "logs")
trainedModelDirectory = os.path.join(assetsDirectory, "models")
configFileCreated = createConfigSpacy(modelName, schemaPath, configFilePath, trainedModelDirectory, logsDirectory)
#endregion
if(configFileCreated == True):
#region Training RASA NLU with schema
TrainingReturnCode = TrainRASA(configFilePath)
#endregion
if(TrainingReturnCode == 0):
return jsonify({"message": "Successfully trained RASA NLU with modelname: " + modelName, "success": True})
# KillProcessWindow(hasAPIStarted, serverExecutablePID)
# serverStartingReturnCode = StartRASAServer(configFileDirectory, modelName)
# #endregion
# if serverStartingReturnCode == 0:
# return jsonify({"message": "Successfully started RASA server on port 5000", "success": True})
# elif serverStartingReturnCode is None:
# return jsonify({"message": "Could not start RASA server, request timed out", "success": False})
else:
return jsonify({"message": "Soemthing wrong happened while training RASA NLU!", "success": False})
else:
return jsonify({"message": "Could not create config file for RASA NLU", "success": False})
#throw exception if request body is empty
return jsonify({"message": "Please enter some JSON, JSON seems to be empty", "success": False})
except Exception as ex:
return jsonify({"Reason": "Failed because" + str(ex.args), "success": False})
#app.route('/api/StopRASAServer', methods = ['GET'])
def StopRASAServer():
try:
global serverExecutablePID
if(serverExecutablePID != 0 or serverExecutablePID != None):
me = psutil.Process(serverExecutablePID)
for child in me.children():
child.kill()
return jsonify({"message": "Server stopped....", "success": True})
except Exception as ex:
return jsonify({"message": "Something went wrong while shutting down the server because: " + str(ex.args), "success": True})
if __name__ == "__main__":
StartRASAServer(configFileDirectory, chitChatModel)
app.run(debug=False, threaded = True, host='0.0.0.0', port = 5050)
There is simple way of doing it, but due to poor code documentation of RASA it is difficult to find.
You will have to create a json in the following format.
training_data = {'rasa_nlu_data': {"common_examples": training_examples,
"regex_features": [],
"lookup_tables": [],
"entity_synonyms": []
}}
In this JSON training_examples is a list and it should contain the data as represented below.
training_examples = [
{
"intent": "greet",
"text": "Hello"
},
{
"intent": "greet",
"text": "Hi, how are you ?"
},
{
"intent": "sad",
"text": "I am not happy with the service"
},
{
"intent": "praise",
"text": "You're a genius"
}
]
with this now, you can train it like this :)
from rasa.nlu import config
# Even config can also be loaded from dict like this
def get_train_config():
return {'language': 'en',
'pipeline': [
{'name': 'WhitespaceTokenizer'},
{'name': 'ConveRTFeaturizer'},
{'name': 'EmbeddingIntentClassifier'}
],
'data': None,
'policies': [
{'name': 'MemoizationPolicy'},
{'name': 'KerasPolicy'},
{'name': 'MappingPolicy'}
]
}
trainer = Trainer(config._load_from_dict(get_train_config()))
interpreter = trainer.train(data)

How to send file through Mattermost incoming webhook?

I am able to send text to Mattermost channel through incoming webhooks
import requests, json
URL = 'http://chat.something.com/hooks/1pgrmsj88qf5jfjb4eotmgfh5e'
payload = {"channel": "general", "text": "some text"}
r = requests.post(URL, data=json.dumps(payload))
this code simplly post text. I could not find a way to post file to channel. Suppose I want to post file located at /home/alok/Downloads/Screenshot_20170217_221447.png. If anyone know please share.
You can't currently attach files using the Incoming Webhooks API. You would need to use the Mattermost Client API to make a post with files attached to it.
Here's an example of how you could achieve that (using Mattermost API v3 for Mattermost >= 3.5)
SERVER_URL = "http://chat.example.com/"
TEAM_ID = "team_id_goes_here"
CHANNEL_ID = "channel_id_goes_here"
USER_EMAIL = "you#example.com"
USER_PASS = "password123"
FILE_PATH = '/home/user/thing_to_upload.png'
import requests, json, os
# Login
s = requests.Session() # So that the auth cookie gets saved.
s.headers.update({"X-Requested-With": "XMLHttpRequest"}) # To stop Mattermost rejecting our requests as CSRF.
l = s.post(SERVER_URL + 'api/v3/users/login', data = json.dumps({'login_id': USER_EMAIL, 'password': USER_PASS}))
USER_ID = l.json()["id"]
# Upload the File.
form_data = {
"channel_id": ('', CHANNEL_ID),
"client_ids": ('', "id_for_the_file"),
"files": (os.path.basename(FILE_PATH), open(FILE_PATH, 'rb')),
}
r = s.post(SERVER_URL + 'api/v3/teams/' + TEAM_ID + '/files/upload', files=form_data)
FILE_ID = r.json()["file_infos"][0]["id"]
# Create a post and attach the uploaded file to it.
p = s.post(SERVER_URL + 'api/v3/teams/' + TEAM_ID + '/channels/' + CHANNEL_ID + '/posts/create', data = json.dumps({
'user_id': USER_ID,
'channel_id': CHANNEL_ID,
'message': 'Post message goes here',
'file_ids': [FILE_ID,],
'create_at': 0,
'pending_post_id': 'randomstuffogeshere',
}))
I have done a version for API v4, with the use of a personal access token. https://docs.mattermost.com/developer/personal-access-tokens.html
import os
import json
import requests
SERVER_URL = "YOUR_SERVER_URL"
CHANNEL_ID = "YOUR_CHANNEL_ID"
FILE_PATH = './test.jpg'
s = requests.Session()
s.headers.update({"Authorization": "Bearer YOUR_PERSONAL_ACCESS_TOKEN"})
form_data = {
"channel_id": ('', CHANNEL_ID),
"client_ids": ('', "id_for_the_file"),
"files": (os.path.basename(FILE_PATH), open(FILE_PATH, 'rb')),
}
r = s.post(SERVER_URL + '/api/v4/files', files=form_data)
FILE_ID = r.json()["file_infos"][0]["id"]
p = s.post(SERVER_URL + '/api/v4/posts', data=json.dumps({
"channel_id": CHANNEL_ID,
"message": "YOUR_MESSAGE",
"file_ids": [ FILE_ID ]
}))
EDIT:
I have created a simple CLI.
https://github.com/Tim-Schwalbe/python_mattermost
as per #George , you cann't sent the file to the incoming webhook directly.
below is code to send the file to the channel
from mattermostdriver import Driver
team_name = "<name of your team in mattermost>"
channel_name = "<channel name>" # name of channel which you want to upload document
file_path = "<file to uploaded >" # name of the file to upload
message = "<message to sent on channel>"
options = {
"url": "", # url of your mattermost acocunt https://<url>
"port": 8065, # port of the website
"password": "<account password>",
"login_id": "<login id>",
"token": None
}
x = Driver(options=options)
# loggin into the mattermost server
x.login()
# getting team id
team_id = x.teams.get_team_by_name(team_name)['id']
# getting channel id
channel_id = x.channels.get_channel_by_name(team_id, channel_name)['id'] # give channel id
#setting up the options
form_data = {
"channel_id": ('', channel_id),
"client_ids": ('', "id_for_the_file"),
"files": (file_path, open(file_path, 'rb'))
}
pp = x.files.upload_file(channel_id, form_data)
file_id = pp['file_infos'][0]['id']
# uploading the file
x.posts.create_post({'channel_id': channel_id, "message": message, "file_ids": [file_id]})
# logout from the server
x.logout()

Appengine channels automatically disconnected on production

On production, a soon as I open a channel with the javascript, it disconnects a seccond after.
Everything works super fine on devserver. The callback works on the server but not on the client. We are using flask, backbone, requirejs and sourcemap.
Client code:
window.channel = new goog.appengine.Channel(window.PLAY_SETTINGS.CHANNEL_TOKEN);
window.gae_websocket = window.channel.open({
onopen: function() {
return console.log('onopen');
},
onclose: function() {
return console.log('onclose');
},
onerror: function() {
return console.log('onerror');
},
onmessage: function() {
return console.log('onmessage');
}
});
Server code:
class Connection(ndb.Model):
user_key = ndb.KeyProperty()
scope = ndb.IntegerProperty(indexed=True, choices=range(0, 2))
target_key = ndb.KeyProperty(indexed=True) # Event ou debate
channel_id = ndb.StringProperty(indexed=True)
#staticmethod
def open_channel():
channel_id = str(uuid4())
channel_token = channel.create_channel(client_id=channel_id, duration_minutes=480)
return channel_token, channel_id
Logs from the appengine production console.
The client callbacks (js) dont works. These are the server callbacks that create the logs:
#app.route('/_ah/channel/disconnected/', methods=['POST'])
def channel_disconnection():
client_id = request.form.get('from')
ndb.delete_multi(Connection.query(Connection.channel_id == client_id).fetch(keys_only=True))
logging.info("Channel closed : %s" % client_id)
return make_response('ok', '200')
#app.route('/_ah/channel/connected/', methods=['POST'])
def channel_connection():
client_id = request.form.get('from')
logging.info("Channel open : %s" % client_id)
return make_response('ok', '200')

Categories

Resources