How realtime capture logs of query from HiveServer2 with python client? - python

I use modified version of pyhs2 (https://pypi.python.org/pypi/pyhs2) with ability run async queries and additional methods from TCLIService.Client (GetLog, send_GetLog, recv_GetLog) in sources of Hue (https://github.com/cloudera/hue/blob/master/apps/beeswax/gen-py/TCLIService/TCLIService.py#L739)
But when I run TCLIService.Client.GetLog method, there is an error:
$ python example.py
Traceback (most recent call last):
File "example.py", line 85, in <module>
rq = client.GetLog(lq)
File "/Users/toly/hive_streaming/libs/pyhs4/TCLIService/TCLIService.py", line 757, in GetLog
return self.recv_GetLog()
File "/Users/toly/hive_streaming/libs/pyhs4/TCLIService/TCLIService.py", line 773, in recv_GetLog
raise x
thrift.Thrift.TApplicationException: Invalid method name: 'GetLog'
In script I use HiveServer2 from Cloudera VM. Same server, as I quess, used by Hue and it successfully works. In addition I try client_protocol in range from 0 to 7 for creating session.
import time
import sasl
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport
from libs.pyhs4.cloudera.thrift_sasl import TSaslClientTransport
from libs.pyhs4.TCLIService import TCLIService
from libs.pyhs4.TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TType, TTypeId, \
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation, TCloseOperationReq, \
TCloseSessionReq, TGetSchemasReq, TCancelOperationReq, TGetLogReq
auth = 'PLAIN'
username = 'apanin'
password = 'none'
host = 'cloudera'
port = 10000
test_hql1 = 'select count(*) from test_text'
def sasl_factory():
saslc = sasl.Client()
saslc.setAttr("username", username)
saslc.setAttr("password", password)
saslc.init()
return saslc
def get_type(typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
def get_value(colValue):
if colValue.boolVal is not None:
return colValue.boolVal.value
elif colValue.byteVal is not None:
return colValue.byteVal.value
elif colValue.i16Val is not None:
return colValue.i16Val.value
elif colValue.i32Val is not None:
return colValue.i32Val.value
elif colValue.i64Val is not None:
return colValue.i64Val.value
elif colValue.doubleVal is not None:
return colValue.doubleVal.value
elif colValue.stringVal is not None:
return colValue.stringVal.value
sock = TSocket(host, port)
transport = TSaslClientTransport(sasl_factory, "PLAIN", sock)
client = TCLIService.Client(TBinaryProtocol(transport))
transport.open()
res = client.OpenSession(TOpenSessionReq(username=username, password=password))
session = res.sessionHandle
query1 = TExecuteStatementReq(session, statement=test_hql1, confOverlay={}, runAsync=True)
response1 = client.ExecuteStatement(query1)
opHandle1 = response1.operationHandle
while True:
time.sleep(1)
q1 = TGetOperationStatusReq(operationHandle=opHandle1)
res1 = client.GetOperationStatus(q1)
lq = TGetLogReq(opHandle1)
rq = client.GetLog(lq)
if res1.operationState == 2:
break
req = TCloseOperationReq(operationHandle=opHandle1)
client.CloseOperation(req)
req = TCloseSessionReq(sessionHandle=session)
client.CloseSession(req)
How realtime capture logs of hive query from HiveServer2?
UPD Hive version - 1.2.1

For getting logs of operation used method FetchResults with param fetchType=1 - returning logs.
Example usage:
query1 = TExecuteStatementReq(session, statement=test_hql1, confOverlay={}, runAsync=True)
response1 = client.ExecuteStatement(query1)
opHandle1 = response1.operationHandle
while True:
time.sleep(1)
q1 = TGetOperationStatusReq(operationHandle=opHandle1)
res1 = client.GetOperationStatus(q1)
request_logs = TFetchResultsReq(operationHandle=opHandle1, orientation=0, maxRows=10, fetchType=1)
response_logs = client.FetchResults(request_logs)
print response_logs.results
if res1.operationState == 2:
break

Related

Multiprocessing in SAP using Python

My SAP is very old and I can't make API calls with it. So, I have to manipulate the GUI of SAP to do my stuff.
I'm trying to access two SAP transactions at the same time in two different windows using Python.
To do this I'm using the libraries: pywin32, subprocess and multiprocessing.
But I'm getting the following error:
TypeError: cannot pickle 'PyIDispatch' object
and
PermissionError: [WinError 5] Acess denied
What I got until now is to open two windows (create two SAP sessions) and access the transaction in different windows but one after the other, in other words, not at the same time.
This test program constitutes in 3 separated scripts:
One have the class to create a connection, create the first session and login into the account.
The second class is to "manipulate" the SAP
The last one is the main script.
The Scripts:
createconnection.py
from subprocess import Popen
import time
from win32com.client import GetObject
class Sap:
def __init__(self, sap_env, user_id, user_password, language="EN",
newSession=False, connectBy=2):
self.sap_file = "C:\\Program Files (x86)\\SAP\\FrontEnd\\SapGui" +\
"\\saplogon.exe"
self.sap_env = sap_env
self.user_id = user_id
self.user_password = user_password
self.language = language
self.connectBy = connectBy
self.newSession = newSession
def __get_sap_gui__(self):
try:
return GetObject('SAPGUI').GetScriptingEngine
except:
time.sleep(0.5)
return self.__get_sap_gui__()
def get_sap_connection(self):
if self.connectBy == 3:
Popen(self.sap_file + ' ' + self.sap_env)
sapGui = self.__get_sap_gui__()
conn = sapGui.Connections(0)
timeout = 10
while conn.Sessions.Count == 0 and timeout:
time.sleep(1)
timeout -= 1
if timeout == 0: raise Exception("Fail to connect")
else:
Popen(self.sap_file)
sapGui = self.__get_sap_gui__()
conn = None
if self.connectBy == 1:
if sapGui.Connections.Count > 0: # it's not good, I'll fix this later
for conn in sapGui.Connections:
if conn.Description == self.sap_env:
break
if not conn:
conn = sapGui.OpenConnection(self.sap_env)
else:
if sapGui.Connections.Count > 0:
for conn in sapGui.Connections:
if self.sap_env in conn.ConnectionString:
break
if not conn:
conn = sapGui.OpenConnectionByConnectionString(self.sap_env)
return conn
def get_sap_session(self, conn):
if self.newSession:
numSessions = conn.Sessions.Count + 1
conn.Sessions(0).createsession()
while conn.Sessions.Count < numSessions: pass
session = conn.Sessions(numSessions-1)
else:
session = conn.Sessions(0)
if session.findById('wnd[0]/sbar').text.startswith('SNC logon'):
session.findById('wnd[0]/usr/txtRSYST-LANGU').text = self.language
session.findById('wnd[0]').sendVKey(0)
session.findById('wnd[0]').sendVKey(0)
elif session.Info.User == '':
session.findById('wnd[0]/usr/txtRSYST-BNAME').text = self.user_id
session.findById('wnd[0]/usr/pwdRSYST-BCODE').text =\
self.user_password
session.findById('wnd[0]/usr/txtRSYST-LANGU').text = self.language
session.findById('wnd[0]').sendVKey(0)
session.findById('wnd[0]').maximize()
return session
manipulatesap.py
from createconnection import Sap
class QuerySap(Sap):
def __init__(self, sap_env, user_id, user_password, language):
super().__init__(sap_env, user_id, user_password, language=language)
self.connection = self.get_sap_connection()
self.session = self.get_sap_session(self.connection)
self.new_session = None
def open_new_windows(self):
self.connection.Sessions(0).createsession()
self.connection.Sessions(0).createsession()
self.new_session = self.connection.Sessions(1)
#property
def sess1(self):
return self.session
#property
def sess2(self):
return self.new_session
main.py
from manipulatesap import QuerySap
from multiprocessing import Pool, Process
from time import sleep
def goto_trasaction(session, transacion):
session.findById("wnd[0]/tbar[0]/okcd").text = transacion
session.findById("wnd[0]").sendVKey(0)
sleep(5)
def sap_interface_multi_process(usr, pw, env):
sap_nav = QuerySap(sap_env=env, user_id=usr,user_password=pw,
language="PT")
sap_nav.open_new_windows()
session1 = sap_nav.sess1
session2 = sap_nav.sess2
p1 = Process(target=goto_trasaction, args=(session1, "TRANSACION A"))
p2 = Process(target=goto_trasaction, args=(session2, "TRANSACTION B"))
p1.start()
p2.start()
p1.join()
p1.join()
def main():
print(">>> Start")
sap_env = "string_for_connection"
sap_interface_multi_process("usr_id", "usr_pw", sap_env)
print(">>> Finish")
if __name__ == "__main__":
main()
Could you guys help me to find what I missing and what I should do?
Thank you very much
Finally I got the solution after sometime of vaction.
But I had to refactory a lot of my code.
What I did was to instantiate the sap class to an object and pass this object to a function that will be executed in parallel. Inside of this function I use the sap class method to create a connection and create a session.
Here is my solution. Not pretty but worked:
from Modules.Sap.sapinit import Sap
def create_sap_session(sap_obj, extra_num_sessions):
sap_conn = sap_obj.get_sap_connection()
sap_obj.get_sap_session(sap_conn)
if extra_num_sessions < 1:
return
for _ in range(extra_num_sessions):
sap_conn.Sessions(0).createsession()
return
def parallel_sap_query(sap_obj, sessions_num, transaction):
sap_conn = sap_obj.get_sap_connection()
sap_session = sap_conn.Sessions(sessions_num)
session.findById("wnd[0]/tbar[0]/okcd").text = transaction
session.findById("wnd[0]").sendVKey(0)
def execute_cancellations(sap_obj):
create_sap_session(sap_obj, 2)
sleep(3)
p1 = Process(target=parallel_sap_query, args=(sap_obj, 0, "A", ))
p2 = Process(target=parallel_sap_query, args=(sap_obj, 1, "B", ))
p3 = Process(target=parallel_sap_query, args=(sap_obj, 2, "C", ))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
close_sap_sessions(sap_obj, 2, 1, 0)
def close_sap_sessions(sap_obj, *sessions):
sap_conn = sap_obj.get_sap_connection()
for session in sessions:
sap_session = sap_conn.Sessions(session)
sap_session.findById("wnd[0]").close()
sap_session.findById("wnd[1]/usr/btnSPOP-OPTION1").press()
def main():
sap_obj = Sap(sap_env, sap_id, sap_pw, "PT")
execute_cancellations(sap_obj)

Custom Python HP ILO Node Exporter not changing hostname by request

Im trying to edit this project in Python to have HP ILO exporter for Prometheus, so far I read a few articles here on stackoverflow and tried to implement some functionalities, eventually I came up to partialy working script but the hostname is not changing after first request, is there a way to dump collector?
I have tried it with try&except but it just does not work.
The goal is to use curl like this
curl localhost:9116/metrics?hostname=ip
And what will happen if there will be 10 requests at the same time with different hostname? Should it create somekind of a queue?
Can someone help me? Thanks
Original Project : https://github.com/JackWindows/ilo-exporter
My code :
#!/usr/bin/env python
import collections
import os
import time
import flask
import redfish
import waitress
from flask import Flask
from prometheus_client import make_wsgi_app
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from flask import request
from time import sleep
from flask import Flask, Response, request
import traceback
from werkzeug.wsgi import ClosingIterator
class AfterResponse:
def __init__(self, app=None):
self.callbacks = []
if app:
self.init_app(app)
def __call__(self, callback):
self.callbacks.append(callback)
return callback
def init_app(self, app):
# install extension
app.after_response = self
# install middleware
app.wsgi_app = AfterResponseMiddleware(app.wsgi_app, self)
def flush(self):
for fn in self.callbacks:
try:
fn()
except Exception:
traceback.print_exc()
class AfterResponseMiddleware:
def __init__(self, application, after_response_ext):
self.application = application
self.after_response_ext = after_response_ext
def __call__(self, environ, after_response):
iterator = self.application(environ, after_response)
try:
return ClosingIterator(iterator, [self.after_response_ext.flush])
except Exception:
traceback.print_exc()
return iterator
class ILOCollector(object):
def __init__(self, hostname: str, port: int = 443, user: str = 'admin', password: str = 'password') -> None:
self.ilo = redfish.LegacyRestClient(base_url=hostname, username=user, password=password)
self.ilo.login()
system = self.ilo.get('/redfish/v1/Systems/1/').obj
self.label_names = ('hostname', 'product_name', 'sn')
self.label_values = (hostname, system.Model, system.SerialNumber.strip())
def collect(self):
embedded_media = self.ilo.get('/redfish/v1/Managers/1/EmbeddedMedia/').obj
smart_storage = self.ilo.get('/redfish/v1/Systems/1/SmartStorage/').obj
thermal = self.ilo.get('/redfish/v1/Chassis/1/Thermal/').obj
power = self.ilo.get('/redfish/v1/Chassis/1/Power/').obj
g = GaugeMetricFamily('hpilo_health',
'iLO health status, -1: Unknown, 0: OK, 1: Degraded, 2: Failed.',
labels=self.label_names + ('component',))
def status_to_code(status: str) -> int:
status = status.lower()
ret = -1
if status == 'ok':
ret = 0
elif status == 'warning':
ret = 1
elif status == 'failed':
ret = 2
return ret
g.add_metric(self.label_values + ('embedded_media',), status_to_code(embedded_media.Controller.Status.Health))
g.add_metric(self.label_values + ('smart_storage',), status_to_code(smart_storage.Status.Health))
for fan in thermal.Fans:
g.add_metric(self.label_values + (fan.FanName,), status_to_code(fan.Status.Health))
yield g
g = GaugeMetricFamily('hpilo_fan_speed', 'Fan speed in percentage.',
labels=self.label_names + ('fan',), unit='percentage')
for fan in thermal.Fans:
g.add_metric(self.label_values + (fan.FanName,), fan.CurrentReading)
yield g
sensors_by_unit = collections.defaultdict(list)
for sensor in thermal.Temperatures:
if sensor.Status.State.lower() != 'enabled':
continue
reading = sensor.CurrentReading
unit = sensor.Units
sensors_by_unit[unit].append((sensor.Name, reading))
for unit in sensors_by_unit:
g = GaugeMetricFamily('hpilo_temperature', 'Temperature sensors reading.',
labels=self.label_names + ('sensor',), unit=unit.lower())
for sensor_name, sensor_reading in sensors_by_unit[unit]:
g.add_metric(self.label_values + (sensor_name,), sensor_reading)
yield g
g = GaugeMetricFamily('hpilo_power_current', 'Current power consumption in Watts.', labels=self.label_names,
unit='watts')
g.add_metric(self.label_values, power.PowerConsumedWatts)
yield g
label_values = self.label_values + (str(power.PowerMetrics.IntervalInMin),)
g = GaugeMetricFamily('hpilo_power_average', 'Average power consumption in Watts.',
labels=self.label_names + ('IntervalInMin',), unit='watts')
g.add_metric(label_values, power.PowerMetrics.AverageConsumedWatts)
yield g
g = GaugeMetricFamily('hpilo_power_min', 'Min power consumption in Watts.',
labels=self.label_names + ('IntervalInMin',), unit='watts')
g.add_metric(label_values, power.PowerMetrics.MinConsumedWatts)
yield g
g = GaugeMetricFamily('hpilo_power_max', 'Max power consumption in Watts.',
labels=self.label_names + ('IntervalInMin',), unit='watts')
g.add_metric(label_values, power.PowerMetrics.MaxConsumedWatts)
yield g
# Create Flask app
app = Flask('iLO Exporter')
#app.route('/')
def root():
return '''<html>
<head><title>iLO Exporter</title></head>
<body>
<h1>iLO Exporter</h1>
<p><a href='/metrics'>Metrics</a></p>
</body>
</html>'''
AfterResponse(app)
#app.after_response
def say_hi():
print("hi")
#app.route("/metrics")
def home():
try:
REGISTRY.unregister(collector)
except:
print("An exception occurred")
pass
port = int(os.getenv('ILO_PORT', 443))
user = os.getenv('ILO_USER', 'admin')
password = os.getenv('ILO_PASSWORD', 'password')
hostname = request.args.get('hostname')
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/metrics': make_wsgi_app()
})
collector = ILOCollector(hostname, port, user, password)
REGISTRY.register(collector)
if __name__ == '__main__':
exporter_port = int(os.getenv('LISTEN_PORT', 9116))
waitress.serve(app, host='0.0.0.0', port=exporter_port)

Is there a way to read data from redis server using pandas?

I'm working on a project with IoT devices that are connected to a dotnet server hosted in the Azure cloud. I'm currently using for loops to read real-time data but want to read some real-time stats from the Redis database using Pandas. Can Someone explain to me the way how to start with?
Using the below script to read stats but want to start using pandas.
import os
import re
import json
import traceback
from collections import Counter
import time
import datetime as dt
import redis
from tqdm import tqdm # taqadum (تقدّم) == progress
from jsonpointer import resolve_pointer as j_get
from jsonpointer import JsonPointerException
import pandas as pd
os.system("color 0c") # change console color to red
if False:
# x Redis
r = redis.Redis(host="****.redis.cache.windows.net",
port=***,
password="***",
ssl=True,)
else:
# y Redis
r = redis.Redis(host="***.redis.cache.windows.net",
port=****,
password="*****",
ssl=True,)
print(r.info())
print("Server started at: ", end="")
print(dt.datetime.now() - dt.timedelta(seconds=r.info()['uptime_in_seconds']))
print("Building pipe")
pipe = r.pipeline()
# for key in tqdm(r.scan_iter("MC:SessionInfo*")):
for key in tqdm(r.scan_iter("MC:SessionInfo*", count=2500)):
pipe.hgetall(key)
print("Executing pipe")
responses = pipe.execute()
print("Processing effluvia")
q = {}
k={}
first = True
last_contact = {}
for data in tqdm(responses):
try:
j = json.loads(data[b'LastStatusBody'])
serial = j['System']['Serial'].lower()
q[serial] = j
last_contact[serial] = time.time() - int(data[b'LastContact'])
# TODO: json searching sensibly!
vac[serial] = j['LiveA']['Unit']['Volatge_Vac']
except:
if first:
traceback.print_exc()
first = False
else:
pass
for key,value in fw_versions.items():
if value.split(',')[0]=="xx v1.0.0.0":
x_paired.append(key)
print(x_paired)
print("Total paired :", len(x_paired))`
Instead of the above procedure want to start with Pandas to read data easily and do some charts for daily updates to the team.
I serialize / deserialize to pyarrow or pickle and then use an additional key as meta data. This works across local, GCloud, AWS EB and Azure
import pandas as pd
import pyarrow as pa, os
import redis,json, os, pickle
import ebutils
from logenv import logenv
from pandas.core.frame import DataFrame
from redis.client import Redis
from typing import (Union, Optional)
class mycache():
__redisClient:Redis
CONFIGKEY = "cacheconfig"
def __init__(self) -> None:
try:
ep = os.environ["REDIS_HOST"]
except KeyError:
if os.environ["HOST_ENV"] == "GCLOUD":
os.environ["REDIS_HOST"] = "redis://10.0.0.3"
elif os.environ["HOST_ENV"] == "EB":
os.environ["REDIS_HOST"] = "redis://" + ebutils.get_redis_endpoint()
elif os.environ["HOST_ENV"] == "AZURE":
#os.environ["REDIS_HOST"] = "redis://ignore:password#redis-sensorvenv.redis.cache.windows.net"
pass # should be set in azure env variable
elif os.environ["HOST_ENV"] == "LOCAL":
os.environ["REDIS_HOST"] = "redis://127.0.0.1"
else:
raise "could not initialise redis"
return # no known redis setup
#self.__redisClient = redis.Redis(host=os.environ["REDIS_HOST"])
self.__redisClient = redis.Redis.from_url(os.environ["REDIS_HOST"])
self.__redisClient.ping()
# get config as well...
self.config = self.get(self.CONFIGKEY)
if self.config is None:
self.config = {"pyarrow":True, "pickle":False}
self.set(self.CONFIGKEY, self.config)
self.alog = logenv.alog()
def redis(self) -> Redis:
return self.__redisClient
def exists(self, key:str) -> bool:
if self.__redisClient is None:
return False
return self.__redisClient.exists(key) == 1
def get(self, key:str) -> Union[DataFrame, str]:
keytype = "{k}.type".format(k=key)
valuetype = self.__redisClient.get(keytype)
if valuetype is None:
if (key.split(".")[-1] == "pickle"):
return pickle.loads(self.redis().get(key))
else:
ret = self.redis().get(key)
if ret is None:
return ret
else:
return ret.decode()
elif valuetype.decode() == str(pd.DataFrame):
# fallback to pickle serialized form if pyarrow fails
# https://issues.apache.org/jira/browse/ARROW-7961
try:
return pa.deserialize(self.__redisClient.get(key))
except pa.lib.ArrowIOError as err:
self.alog.warning("using pickle from cache %s - %s - %s", key, pa.__version__, str(err))
return pickle.loads(self.redis().get(f"{key}.pickle"))
except OSError as err:
if "Expected IPC" in str(err):
self.alog.warning("using pickle from cache %s - %s - %s", key, pa.__version__, str(err))
return pickle.loads(self.redis().get(f"{key}.pickle"))
else:
raise err
elif valuetype.decode() == str(type({})):
return json.loads(self.__redisClient.get(key).decode())
else:
return self.__redisClient.get(key).decode() # type: ignore
def set(self, key:str, value:Union[DataFrame, str]) -> None:
if self.__redisClient is None:
return
keytype = "{k}.type".format(k=key)
if str(type(value)) == str(pd.DataFrame):
self.__redisClient.set(key, pa.serialize(value).to_buffer().to_pybytes())
if self.config["pickle"]:
self.redis().set(f"{key}.pickle", pickle.dumps(value))
# issue should be transient through an upgrade....
# once switched off data can go away
self.redis().expire(f"{key}.pickle", 60*60*24)
elif str(type(value)) == str(type({})):
self.__redisClient.set(key, json.dumps(value))
else:
self.__redisClient.set(key, value)
self.__redisClient.set(keytype, str(type(value)))
if __name__ == '__main__':
os.environ["HOST_ENV"] = "LOCAL"
r = mycache()
rr = r.redis()
for k in rr.keys("cache*"):
print(k.decode(), rr.ttl(k))
print(rr.get(k.decode()))

return all data from a loop

I have code that logs into devices. I can print the inform from the devices in the loop just fine. But i can only return "not print" the data from the last device in the list. How can i return all data from all devices on the loop ?
From flask import Flask, jsonify, request
import netmiko
from netmiko.ssh_autodetect import SSHDetect
from netmiko.ssh_exception import NetMikoTimeoutException
import time
'app = Flask(name)
#app.route('/firewall', methods=['GET','POST', 'DELETE'])
def firewall():
# Authentication
headers = request.headers
auth = headers.get("xxxxx")
if auth == 'xxxx':
data = request.get_json(force=True)
fw_a = data["DeviceAddressList"]
src_a = data['SourceAddressList']
src_p = data['SourcePortList']
dst_a = data['DestinationAddressList']
dst_p = data['DestinationPortList']
policy = data["PolicyAllow"]
p_col = data['Protocol']
p_show = data['show']
p_push = data['push']
config = data['config']
# Juniper Normalize the data for command line interface
juniper_command = '"({})"'.format('|'.join(src_a + src_p + dst_a + dst_p))
username = "xxxx"
password = "Pxxxx"
try:
ip_list = fw_a
for ip in ip_list:
#print(ip)
device = {"device_type": "autodetect", "username": username, "host": ip, "password": password}
guesser = SSHDetect(**device)
best_match = guesser.autodetect()
print(best_match)
if "None" in str(best_match):
continue
#else:
if "true" in str(p_show) and "juniper_junos" in str(best_match):
device["device_type"] = best_match
connection = netmiko.ConnectHandler(**device,)
connection.find_prompt(delay_factor=2)
time.sleep(1)
connection.enable()
resp = connection.send_command(
'show configuration | display xml | match ' + str(juniper_command), delay_factor=2)
print(ip + '\n' + best_match + resp)
if "true" in str(p_push) and "juniper_junos" in str(best_match):
device["device_type"] = best_match
connection = netmiko.ConnectHandler(**device)
connection.find_prompt(delay_factor=2)
time.sleep(1)
connection.enable()
push_resp = connection.send_command(config, delay_factor=2)
connection.disconnect()
print(push_resp)
return ip + '\n' + best_match + resp
except NetMikoTimeoutException:
return "This Network Device is not reachable"
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
Blockquote
Code example: Loop over the ips, get the value you want to return for each ip and push it into a dict. Return the dict to the caller of the function 'firewall'
def firewall():
result = dict()
for ip in ip_list:
push_resp = dummy_get_push_resp()
result[ip] = push_resp
return result

Ryu controller struct.error when adding a new table flow

I'm writing a Ryu a L4 swtich application and i am trying to do the following: when a TCP/UDP packet is identified the application checks in a local database to see if the packet parameters are from a known attacker (source IP, destination IP and destination port).
If the packet matches one logged in the attacker database a flow is added to the switch to drop the specific packet (this flow has a duration of 2 hours), if the packet doesn't match a flow is added to forward to a specific switch port (this flow has a duration of 5 minutes).
The problem is, when the controller sends the new flow to the switch/datapath i receive the following error:
SimpleSwitch13: Exception occurred during handler processing. Backtrace from offending handler [_packet_in_handler] servicing event [EventOFPPacketIn] follows.
Traceback (most recent call last):
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/base/app_manager.py", line 290, in _event_loop
handler(ev)
File "/root/SecAPI/Flasks/Code/SDN/switchL3.py", line 237, in _packet_in_handler
self.add_security_flow(datapath, 1, match, actions)
File "/root/SecAPI/Flasks/Code/SDN/switchL3.py", line 109, in add_security_flow
datapath.send_msg(mod)
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/controller/controller.py", line 423, in send_msg
msg.serialize()
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/ofproto/ofproto_parser.py", line 270, in serialize
self._serialize_body()
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/ofproto/ofproto_v1_3_parser.py", line 2738, in _serialize_body
self.out_group, self.flags)
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/lib/pack_utils.py", line 25, in msg_pack_into
struct.pack_into(fmt, buf, offset, *args)
struct.error: 'H' format requires 0 <= number <= 65535
heres my full code:
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.lib.packet import in_proto
import sqlite3
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.initial = True
self.security_alert = False
#set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
self.initial = True
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
self.initial = False
# Adds a flow into a specific datapath, with a hard_timeout of 5 minutes.
# Meaning that a certain packet flow ceases existing after 5 minutes.
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
if self.initial == True:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
elif self.initial == False:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst,hard_timeout=300)
else:
if self.initial == True:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
elif self.initial == False:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst,
hard_timeout=300)
datapath.send_msg(mod)
# Adds a security flow into the controlled device, a secured flow differs from a normal
# flow in it's duration, a security flow has a duration of 2 hours.
def add_security_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
# actions)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_CLEAR_ACTIONS, [])]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority,match=match,command=ofproto.OFPFC_ADD,
instructions=inst, hard_timeout=432000)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst, command=ofproto.OFPFC_ADD,
hard_timeout=432000)
datapath.send_msg(mod)
# Deletes a already existing flow that matches has a given packet match.
def del_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath,buffer_id=buffer_id,
priority=priority,match=match,instruction=inst,
command=ofproto.OFPFC_DELETE)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst,
command=ofproto.OFPFC_DELETE)
datapath.send_msg(mod)
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
#match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
# check IP Protocol and create a match for IP
if eth.ethertype == ether_types.ETH_TYPE_IP:
conn = sqlite3.connect("database/sdnDatabase.db")
cursor = conn.cursor()
ip = pkt.get_protocol(ipv4.ipv4)
srcip = ip.src
dstip = ip.dst
#match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,ipv4_src=srcip,ipv4_dst=dstip)
protocol = ip.proto
# ICMP Protocol
if protocol == in_proto.IPPROTO_ICMP:
print("WARN - We have a ICMP packet")
cursor.execute('select id from knownAttackers where srcaddr = \"{0}\" and dstaddr = \"{1}\" and protocol = "icmp";'.format(srcip, dstip))
result = cursor.fetchall()
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_src=srcip, ipv4_dst=dstip,
ip_proto=protocol)
if len(result) == 0:
self.security_alert = False
else:
self.security_alert = True
# TCP Protocol
elif protocol == in_proto.IPPROTO_TCP:
print("WARN - We have a TCP packet")
t = pkt.get_protocol(tcp.tcp)
cursor.execute('select id from knownAttackers where srcaddr = \"{0}\" and dstaddr = \"{1}\" and dstport = \"{2}\" and protocol = "tcp";'.format(srcip, dstip, t.dst_port))
result = cursor.fetchall()
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_src=srcip, ipv4_dst=dstip,
ip_proto=protocol, tcp_dst=t.dst_port)
if len(result) == 0:
self.security_alert = False
else:
print("We have a register in the database for this specific packet: {0}".format(result))
self.security_alert = True
# UDP Protocol
elif protocol == in_proto.IPPROTO_UDP:
print("WARN - We have a UDP packet")
u = pkt.get_protocol(udp.udp)
cursor.execute('select id from knownAttackers where srcaddr = \"{0}\" and dstaddr = \"{1}\" and dstport = \"{2}\" and protocol = "udp";'.format(srcip, dstip, u.dst_port))
result = cursor.fetchall()
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_src=srcip, ipv4_dst=dstip,
ip_proto=protocol, udp_dst=u.dst_port)
if len(result) == 0:
self.security_alert = False
else:
self.security_alert = True
else:
self.security_alert = False
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if self.security_alert == False:
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
elif self.security_alert == True:
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_security_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_security_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
if self.security_alert == False:
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
the above error appears in the end of the add_security_flow() class method when i try to make a TCP connection that is identified as a known attacker, when he tries to send the flow modification (datapath.send_msg(mod)) to the switch/datapath.
What am i doing wrong? Am i'm missing some sort of variable?
In the Ryu controller mailing list a user named IWAMOTO told me that my hard_timeout values was too large for the struct packing (2 hours is 7200 seconds, i dont know where my head was for me to find 432000 haha), after downsizing the hard_timeout to 7200 seconds everything worked out just fine.
Always check the size of the values you're trying to send to a datapath, see if it doesn't exceed 65535.

Categories

Resources