I'm using aws lambda for a slack app, and I'm handling an interactive response(so I need to send a response in 3 seconds)
I invoke another lambda in my code with the Event type, and returning a return {"statusCode": 200} but I can't find in cw logs the returned value, the lambda execute with no issues but there is no returned value.
this is my code:
import logging
from urllib.parse import parse_qs
import utils.slack.client as slack
from functions.flows.update_zendesk_ticket import pass_to_pso
from lambda_warmer.lambda_warmer import lambda_warmup
from utils.common import invoke_lambda, PSO_NOC_ALERTS
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
#lambda_warmup()
def lambda_handler(event, context):
logger.info(f'this is the event: {event}')
logger.info(f'this is the context: {context}')
params = dict(parse_qs(event['body'], keep_blank_values=False))
if "payload" in params:
payload = eval(params["payload"][0].replace('false', 'False').replace('null', 'None').replace('true', 'True'))
if payload["type"] == "message_action":
logger.info(f'{payload["user"]["username"]} clicked on {payload["view"]["callback_id"]}')
elif payload["type"] == "view_submission":
logger.debug(payload)
logger.info(f'{payload["user"]["username"]} submitted {payload["view"]["callback_id"]}')
submitted_data = payload["view"]["state"]["values"]
logger.info(submitted_data)
if payload["view"]["callback_id"] == "pass_to_pso":
result = pass_to_pso_handler(submitted_data)
return result
return {"statusCode": 200}
def pass_to_pso_handler(submitted_data):
pso_slack_id = submitted_data["pso"]["pso_select-action"]["selected_user"]
slack_client = slack.SlackClient()
pso_email = slack_client.get_email_from_slack(pso_slack_id)
zd_ticket_id = submitted_data["ticket_id"]["ticket_id-action"]["value"]
thread_link = submitted_data["thread_link"]["thread_link-action"]["value"]
reply_language = submitted_data["reply_language"]["reply_language-action"]["selected_option"][
"value"]
reply_type = submitted_data["reply_type"]["reply_type-action"]["selected_option"]["value"]
pass_to_pso(pso_email=pso_email, ticket_id=zd_ticket_id, thread_link=thread_link,
reply_language=reply_language, reply_type=reply_type)
pso_name = pso_email.split('#')[0]
invoke_lambda({
"pso": pso_name,
"ticket id": zd_ticket_id,
"channel_id": PSO_NOC_ALERTS
}, "Event")
return {"statusCode": 200}
the invoke function:
def invoke_lambda(payload, invocation_type):
client = boto3.client('lambda', 'us-east-1')
response = client.invoke(
FunctionName=SLACK_MESSAGE_LAMBDA,
InvocationType=invocation_type,
Payload=bytes(json.dumps(payload), encoding='utf8'))
and this is the last rows of my cw logs
I think the only way to log your return to CloudWatch Logs by printing it. Or else, it is only visible to your function's integrations such as API Gateway.
import json
status = {"statusCode": 200}
print(json.dumps(status))
return status
Related
I am using boto3 api to get all the log events in cloud watch.
The following is my code
import boto3
client = boto3.client("logs")
LOG_GROUP_NAME = "/foo/bar/foo-jobs/foo"
instance_id= "i-somefooid"
log_events = []
response = client.get_log_events(logGroupName=LOG_GROUP_NAME, logStreamName=instance_id, startFromHead=True)
log_events.extend(response["events"])
next_token = response["nextForwardToken"]
while True:
response = client.get_log_events(logGroupName=LOG_GROUP_NAME, logStreamName=instance_id, nextToken=next_token)
log_events.extend(response["events"])
if next_token == response["nextForwardToken"]:
break
next_token = response["nextForwardToken"]
print(log_events)
Using this I am able to print all the log events for a specified instance id but i am not happy that i have to call .get_log_events twice. The reason is because when i make the first call i don't have a nextToken. I only have it after the initial call. Is there a way to simplify this so that i only make the get_log_events call once inside the while True loop.
I would love to hear some suggestions.
import boto3
log_client = boto3.client('logs')
params = {
'logGroupName': "/foo/bar/foo-jobs/foo",
'logStreamName': "i-somefooid"
}
log_events = []
while params.get('nextToken') != '':
response = log_client.get_log_events(**params)
log_events.extend(response['events'])
next_token = response.get('nextToken')
params['nextToken'] = next_token if next_token else ''
I am working on creating custom image in IBM Cloud using python. I have a very simple straight code for just creating the image and it fails.
As per me I am passing the relevant correct details for all the parameters.
Still I get an Error which is not much descriptive :
ERROR:root:Please check whether the resource you are requesting exists.
Traceback (most recent call last):
File "/Users/deepali.mittal/GITHUB/dcoa/python/build/dmittal/virtual-env36/lib/python3.6/site-packages/ibm_cloud_sdk_core/base_service.py", line 246, in send
response.status_code, http_response=response)
ibm_cloud_sdk_core.api_exception.ApiException: Error: Please check whether the resource you are requesting exists., Code: 400
Process finished with exit code 0
This is not related to the resource missing in COS. As if it is not able to find the image in COS it gives a different error.
Code :
from ibm_vpc import VpcV1 as vpc_client
from ibm_cloud_sdk_core import ApiException
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from boto3 import client as boto3_client
import logging
#logging.basicConfig(level=logging.DEBUG)
SOURCE_OBJECT_PATH = 'cos://us-south/de-images-dmittal/abc.qcow2'
RESOURCE_GROUP_ID = '1234'
OPERATING_SYSTEM = 'ubuntu-16-amd64'
def create_ssm_client():
ssm_client = boto3_client("ssm", region_name="us-west-2")
return ssm_client
def retrieve_ibm_config(ssm_client):
params = ["/ibm/service-key"]
response = ssm_client.get_parameters(Names=params, WithDecryption=True)
try:
api_key = response["Parameters"][0]["Value"]
except (ValueError, IndexError):
raise RuntimeError(
f"Required SSM parameters not retrieved. "
f'Required parameters are: {params}.'
)
return api_key
def create_authenticator(api_key):
authenticator = IAMAuthenticator(api_key)
return authenticator
def create_ibm_client(authenticator):
ibm_client = vpc_client('2021-05-28', authenticator=authenticator)
return ibm_client
def create_image_prototype():
image_file_prototype_model = {'href': SOURCE_OBJECT_PATH}
operating_system_identity_model = {'name': OPERATING_SYSTEM}
resource_group_identity_model = {'id': RESOURCE_GROUP_ID}
image_prototype_model = {
'name': 'my-image',
#'resource_group': resource_group_identity_model,
'file': image_file_prototype_model,
'operating_system': operating_system_identity_model
}
image_prototype = image_prototype_model
return image_prototype
def create_image():
ssm_client = create_ssm_client()
api_key = retrieve_ibm_config(ssm_client)
authenticator = create_authenticator(api_key)
ibm_client = create_ibm_client(authenticator)
image_prototype = create_image_prototype()
try:
#images = ibm_client.list_images()
#print(vpc)
#ibm_client.set_service_url('https://us-south.iaas.cloud.ibm.com/v1')
response = ibm_client.create_image(image_prototype)
print(response)
except ApiException as e:
print("Failed")
if __name__ == "__main__":
create_image()
Issue was with IAM Permission. After fixing it worked, the error shown was not relevant so it took time to figure out
Trying to work out a functioning version of this broken repo and getting an error regarding the type of data. It is detecting an integer where it should be a string. There are other modules that you can see in the github repo but basically this should trigger some messages to Slack based on cloudwatch events.
Stack trace is below.
from __future__ import print_function
import json
import boto3
import time
# from test_events import TEST_EVENTS, TEST_ERROR_EVENTS
from build_info import BuildInfo, CodeBuildInfo
from slack_helper import post_build_msg, find_message_for_build
from message_builder import MessageBuilder
import re
import sys
client = boto3.client('codepipeline')
def findRevisionInfo(info):
r = client.get_pipeline_execution(
pipelineName=info.pipeline,
pipelineExecutionId=info.executionId
)['pipelineExecution']
revs = r.get('artifactRevisions',[])
if len(revs) > 0:
return revs[0]
return None
def pipelineFromBuild(codeBuildInfo):
r = client.get_pipeline_state(name=codeBuildInfo.pipeline)
for s in r['stageStates']:
for a in s['actionStates']:
executionId = a.get('latestExecution', {}).get('externalExecutionId')
if executionId and codeBuildInfo.buildId.endswith(executionId):
pe = s['latestExecution']['pipelineExecutionId']
return (s['stageName'], pe, a)
return (None, None, None)
def processCodePipeline(event):
buildInfo = BuildInfo.fromEvent(event)
existing_msg = find_message_for_build(buildInfo)
builder = MessageBuilder(buildInfo, existing_msg)
builder.updatePipelineEvent(event)
if builder.needsRevisionInfo():
revision = findRevisionInfo(buildInfo)
builder.attachRevisionInfo(revision)
post_build_msg(builder)
def processCodeBuild(event):
cbi = CodeBuildInfo.fromEvent(event)
(stage, pid, actionStates) = pipelineFromBuild(cbi)
if not pid:
return
buildInfo = BuildInfo(pid, cbi.pipeline)
existing_msg = find_message_for_build(buildInfo)
builder = MessageBuilder(buildInfo, existing_msg)
if 'phases' in event['detail']['additional-information']:
phases = event['detail']['additional-information']['phases']
builder.updateBuildStageInfo(stage, phases, actionStates)
logs = event['detail'].get('additional-information', {}).get('logs')
if logs:
builder.attachLogs(event['detail']['additional-information']['logs'])
post_build_msg(builder)
def process(event):
if event['source'] == "aws.codepipeline":
processCodePipeline(event)
if event['source'] == "aws.codebuild":
processCodeBuild(event)
def run(event, context):
#print(json.dumps(event, indent=2, default=str))
m = process(event)
if __name__ == "__main__":
with open ('test-event.json') as f:
events = json.load(f)
for e in events:
run(e, {})
time.sleep(1)
returns error:
File "/var/task/notifier.py", line 86, in run
m = process(event)
File "/var/task/notifier.py", line 79, in process
if event['source'] == "aws.codepipeline":
TypeError: list indices must be integers, not str
The data that is being ingested looks like:
{
"account": "164943972409",
"region": "us-west-2",
"detail": {
"execution-id": "c776b515-1810-465f-a0ab-3a30a1d4341b",
"pipeline": "buildfish-web-ng-code-pipeline-dev",
"version": 1,
"state": "STARTED"
},
"detail-type": "CodePipeline Pipeline Execution State Change",
"source": "aws.codepipeline",
"version": "0",
"time": "2018-05-20T04:11:41Z",
"id": "ae75c080-2f81-dd60-e6cc-76ec00489305",
"resources": [
"arn:aws:codepipeline:us-west-2:164943972409:buildfish-web-ng-code-pipeline-dev"
]
}
Thanks
In your main function, you open and load the json event.
When looping over this dict, i.e. for e in events: you are looping over the key values in the dict, not the entire event that was loaded at once.
Simply pass the entire event that was read in from json.load(f) to your run(event, context) function.
if __name__ == "__main__":
with open ('test-event.json') as f:
event = json.load(f)
run(event, {})
I have a library in my code that I import it to login to Robinhood trading system.
When the user login in their account, RobinHood server will send an int code to input. This would show as an input in the console. However, I managed to created a pyqt5 input dialog box to ask the user to input the code in it. Then I tried so many ways to forward that code to the input() box even by printing it and hitting 'Enter' after the user hits the button in the message box but it is still not taking the input. I would love to know how I fix this issue and how to find a way to put values in the input() box using pyqt5
These are two functions in a class I have:
The dialog box:
def getCode(self):
keyboard=Controller()
self.logging, okPressed = QInputDialog.getText(self, 'Get Robinhood code', 'Robinhood Code:')
if okPressed:
print(self.logging)
keyboard.press(Key.enter)
The login code:
def connecting(self):
import robin_stocks as robin
self.robin=robin.login(self.robinuser.text(), self.robinpass.text())
and the calling code in my "main":
connecting()
getCode()
The whole code can be like this example:
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit
from PyQt5.QtGui import QIcon
from pynput.keyboard import Key,Controller
import robin_stocks as robin
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 input dialogs - pythonspot.com'
self.left = 500
self.top = 500
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.connecting()
def getCode(self):
keyboard=Controller()
i, okPressed = QInputDialog.getText(self, 'Get Robinhood code', 'Robinhood Code:')
if okPressed:
print(i)
keyboard.press(Key.enter)
def connecting(self):
robin.login('self.robinuser.text()', 'self.robinpass.text()')
self.getCode()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
I find it very difficult to try to write on the console since doing so depends on the OS, instead a possible solution is to modify the login method so that instead of using as input to input() use QInputDialog.
Simply copy the login method, add the necessary imports by changing some parts, and replacing input() with QInputDialog.
import os
import sys
from PyQt5 import QtCore, QtWidgets
import robin_stocks as robin
import robin_stocks.urls as urls
import robin_stocks.helper as helper
import robin_stocks.authentication as authentication
def login(
username,
password,
expiresIn=86400,
scope="internal",
by_sms=True,
store_session=True,
):
"""This function will effectivly log the user into robinhood by getting an
authentication token and saving it to the session header. By default, it will store the authentication
token in a pickle file and load that value on subsequent logins.
:param username: The username for your robinhood account. Usually your email.
:type username: str
:param password: The password for your robinhood account.
:type password: str
:param expiresIn: The time until your login session expires. This is in seconds.
:type expiresIn: Optional[int]
:param scope: Specifies the scope of the authentication.
:type scope: Optional[str]
:param by_sms: Specifies whether to send an email(False) or an sms(True)
:type by_sms: Optional[boolean]
:param store_session: Specifies whether to save the log in authorization for future log ins.
:type store_session: Optional[boolean]
:returns: A dictionary with log in information. The 'access_token' keyword contains the access token, and the 'detail' keyword \
contains information on whether the access token was generated or loaded from pickle file.
"""
device_token = authentication.generate_device_token()
dir_path = os.path.dirname(os.path.realpath(__file__))
pickle_path = os.path.join(dir_path, "data.pickle")
# Challenge type is used if not logging in with two-factor authentication.
if by_sms:
challenge_type = "sms"
else:
challenge_type = "email"
url = urls.login_url()
payload = {
"client_id": "c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS",
"expires_in": expiresIn,
"grant_type": "password",
"password": password,
"scope": scope,
"username": username,
"challenge_type": challenge_type,
"device_token": device_token,
}
# If authentication has been stored in pickle file then load it. Stops login server from being pinged so much.
if os.path.isfile(pickle_path):
# If store_session has been set to false then delete the pickle file, otherwise try to load it.
# Loading pickle file will fail if the acess_token has expired.
if store_session:
try:
with open(pickle_path, "rb") as f:
pickle_data = pickle.load(f)
access_token = pickle_data["access_token"]
token_type = pickle_data["token_type"]
refresh_token = pickle_data["refresh_token"]
# Set device_token to be the original device token when first logged in.
pickle_device_token = pickle_data["device_token"]
payload["device_token"] = pickle_device_token
# Set login status to True in order to try and get account info.
helper.set_login_state(True)
helper.update_session(
"Authorization", "{0} {1}".format(token_type, access_token)
)
# Try to load account profile to check that authorization token is still valid.
res = helper.request_get(
urls.portfolio_profile(), "regular", payload, jsonify_data=False
)
# Raises exception is response code is not 200.
res.raise_for_status()
return {
"access_token": access_token,
"token_type": token_type,
"expires_in": expiresIn,
"scope": scope,
"detail": "logged in using authentication in data.pickle",
"backup_code": None,
"refresh_token": refresh_token,
}
except:
print(
"ERROR: There was an issue loading pickle file. Authentication may be expired - logging in normally."
)
helper.set_login_state(False)
helper.update_session("Authorization", None)
else:
os.remove(pickle_path)
# Try to log in normally.
data = helper.request_post(url, payload)
# Handle case where mfa or challenge is required.
if "mfa_required" in data:
mfa_token = _ = QtWidgets.QInputDialog.getText(
None, "MFA code", "Please type in the MFA code: "
)
payload["mfa_code"] = mfa_token
res = helper.request_post(url, payload, jsonify_data=False)
while res.status_code != 200:
mfa_token = _ = QtWidgets.QInputDialog.getText(
None,
"MFA code",
"That MFA code was not correct. Please type in another MFA code: ",
)
payload["mfa_code"] = mfa_token
res = helper.request_post(url, payload, jsonify_data=False)
data = res.json()
elif "challenge" in data:
challenge_id = data["challenge"]["id"]
sms_code, _ = QtWidgets.QInputDialog.getText(
None, "Get Robinhood code", "Enter Robinhood code for validation: "
)
res = authentication.respond_to_challenge(challenge_id, sms_code)
while "challenge" in res and res["challenge"]["remaining_attempts"] > 0:
sms_code, _ = QtWidgets.QInputDialog.getText(
None,
"Get Robinhood code",
"That code was not correct. {0} tries remaining. Please type in another code: ".format(
res["challenge"]["remaining_attempts"]
),
)
res = authentication.respond_to_challenge(challenge_id, sms_code)
helper.update_session("X-ROBINHOOD-CHALLENGE-RESPONSE-ID", challenge_id)
data = helper.request_post(url, payload)
# Update Session data with authorization or raise exception with the information present in data.
if "access_token" in data:
token = "{0} {1}".format(data["token_type"], data["access_token"])
helper.update_session("Authorization", token)
helper.set_login_state(True)
data["detail"] = "logged in with brand new authentication code."
if store_session:
with open(pickle_path, "wb") as f:
pickle.dump(
{
"token_type": data["token_type"],
"access_token": data["access_token"],
"refresh_token": data["refresh_token"],
"device_token": device_token,
},
f,
)
else:
raise Exception(data["detail"])
return data
class Widget(QtWidgets.QWidget):
login_signal = QtCore.pyqtSignal(str, str)
def __init__(self, parent=None):
super().__init__(parent)
self.username_le = QtWidgets.QLineEdit()
self.password_le = QtWidgets.QLineEdit(echoMode=QtWidgets.QLineEdit.Password)
login_btn = QtWidgets.QPushButton("Login")
login_btn.clicked.connect(self.on_clicked)
flay = QtWidgets.QFormLayout(self)
flay.addRow("Username:", self.username_le)
flay.addRow("Password:", self.password_le)
flay.addRow(login_btn)
def on_clicked(self):
username = self.username_le.text()
password = self.password_le.text()
try:
data = login(username, password)
except Exception as e:
print(e)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
w = Widget()
w.show()
sys.exit(app.exec_())
Although a more robust solution would be to implement the requests using Qt Network.
I'm attempting to write an AWS Lambda which will loop over all Cloudwatch log groups, creating a metric filter for a search term on each log group.
Unfortunately I am finding that although all of my calls to put_metric_filter receive HTTP 200 responses, most of the calls result in nothing getting created (4/15 calls resulting in the creation of a filter).
I have an AWS Lambda with this handler file 'handler.py':
from __future__ import print_function
from basicExample import ManageMetricsAndAlarms
import json, logging
log = logging.getLogger()
log.setLevel(logging.INFO)
def handler(event, context):
log.info("Received event {}".format(json.dumps(event)))
mc = ManageMetricsAndAlarms(event, context)
response = mc.main()
return json.dumps(response)
Which calls the ManageMetricsAndAlarms class from 'basicExample.py' which maps over an array of log group names, creating a metric for each which filters on the term 'ERROR':
from __future__ import print_function
import boto3, os, sys, json, botocore, logging
log = logging.getLogger()
log.setLevel(logging.INFO)
class ManageMetricsAndAlarms:
# -------------------------------------------------
def __init__(self,event,context):
self.event = event
# -------------------------------------------------
def main(self):
cloudwatch = boto3.resource('cloudwatch')
metricsNamespace = 'ExampleMetrics'
errorFilter = '{ $.levelname = "ERROR" }'
# Supposing that I have log groups for 10 imaginatively named lambdas
logGroupNames = [
'/aws/lambda/Lambda-1', '/aws/lambda/Lambda-2',
'/aws/lambda/Lambda-3', '/aws/lambda/Lambda-4',
'/aws/lambda/Lambda-5', '/aws/lambda/Lambda-6',
'/aws/lambda/Lambda-7', '/aws/lambda/Lambda-8',
'/aws/lambda/Lambda-9', '/aws/lambda/Lambda-10'
]
# map over the log groups adding a metric filter for 'ERROR' to each
responses = map(lambda lg: self.createErrorFilter(metricsNamespace, errorFilter, lg), logGroupNames)
return responses
# -------------------------------------------------
def createErrorFilter(self, metricsNamespace, filterPattern, logGroup):
metricName = logGroup + '_ErrorCount'
logs_client = boto3.client('logs')
log.info('Put metric filter ' + metricName + ' with filter $.levelname-ERROR on logGroup: ' + logGroup)
errorFilter = logs_client.put_metric_filter(
logGroupName = logGroup,
filterName ='ERROR-filter',
filterPattern = filterPattern,
metricTransformations = [
{
'metricNamespace': metricsNamespace,
'metricValue': '1',
'metricName': metricName,
}
]
)
log.info('errorFilter response: ' + json.dumps(errorFilter))
return errorFilter
# -------------------------------------------------
I'm quite new to python so I expect I've missed something basic but any help would be much appreciated!
Few things to consider:
Why would you put this on a lambda? are you going to put the same filter every minute/hour on the same lambdas? In general you should execute your script only once (or just after deploying new lambdas.
map is a lazy evaluator, so you will need something like
list(map(function x: print(x), iterable))
if you want to execute the function
Here is an example
import boto3
def createErrorFilter(metricsNamespace, filterPattern, logGroup):
metricName = logGroup + '_example'
logs_client = boto3.client('logs')
errorFilter = logs_client.put_metric_filter(
logGroupName = logGroup,
filterName ='ERROR-filter',
filterPattern = filterPattern,
metricTransformations = [
{
'metricNamespace': metricsNamespace,
'metricValue': '1',
'metricName': metricName,
}
]
)
print('ok')
return
cloudwatch = boto3.resource('cloudwatch')
metricsNamespace = 'ExampleMetrics-2'
errorFilter = 'ERROR'
logGroupNames = [
'/aws/lambda/lambda1', '/aws/lambda/lambda2'
]
# map over the log groups adding a metric filter for 'ERROR' to each
responses = list(map(lambda lg: createErrorFilter(metricsNamespace, errorFilter, lg), logGroupNames))