Jasmin SMSC Gateway / source connector in db - python

I installed Jasmin SMSC Gateway and it's working perfectly.
Now I'm trying to record the sms in a mysql database.
For that I'm using below script to get the messages from RabbitMQ queues :
# -*- coding: utf-8 -*-
import pickle
from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
from twisted.python import log
from txamqp.protocol import AMQClient
from txamqp.client import TwistedDelegate
import txamqp.spec
#Mysql conn pool handler
import PySQLPool
#inlineCallbacks
def gotConnection(conn, username, password):
#print "Connected to broker."
yield conn.authenticate(username, password,'PLAIN')
print "Authenticated. Ready to receive messages"
chan = yield conn.channel(1)
yield chan.channel_open()
yield chan.queue_declare(queue="someQueueName10")
# Bind to submit.sm.* and submit.sm.resp.* routes
yield chan.queue_bind(queue="someQueueName10", exchange="messaging", routing_key='submit.sm.*')
yield chan.queue_bind(queue="someQueueName10", exchange="messaging", routing_key='deliver.sm.*')
yield chan.queue_bind(queue="someQueueName10", exchange="messaging", routing_key='submit.sm.resp.*')
yield chan.basic_consume(queue='someQueueName10', no_ack=True, consumer_tag="someTag")
queue = yield conn.queue("someTag")
#Build Mysql connection pool
PySQLPool.getNewPool().maxActiveConnections = 20 #Set how many reusable conns to buffer in the pool
print "Pooling 20 connections"
#Connection parameters - Fill this info with your MySQL server connection parameters
mysqlconn = PySQLPool.getNewConnection(
username='jasmin_db',
password='jasmindb',
host='127.0.0.1',
db='jasmin_db')
print "Connected to MySQL"
queryp = PySQLPool.getNewQuery(mysqlconn)
# Wait for messages
# This can be done through a callback ...
while True:
print 'test1'
msg = yield queue.get()
props = msg.content.properties
pdu = pickle.loads(msg.content.body)
print 'test'
print '%s' % (msg.routing_key)
if msg.routing_key[:15] == 'submit.sm.resp.':
print 'SubmitSMResp: status: %s, msgid: %s' % (pdu.status,
props['message-id'])
queryp.Query("UPDATE table_name SET status='%s' WHERE messageid='%s'" % (pdu.status,props['message-id']))
PySQLPool.commitPool()
elif msg.routing_key[:10] == 'submit.sm.':
print 'SubmitSM: from %s to %s, content: %s, msgid: %s supp %s ' % (pdu.params['source_addr'],
pdu.params['destination_addr'],
pdu.params['short_message'],
props['message-id'],
pdu.params['source_addr']
)
queryp.Query("INSERT INTO cdrs (messageid,carrier,date,dst,src,status,accountcode,cost,sale,plan_name,amaflags,content) VALUES ('%s','%s',NOW(),'%s','%s','%s','00000','0.0','0.0','plan_name','some_status','%s') " % (props
['message-id'],msg.routing_key.replace("submit.sm.",""), pdu.params['destination_addr'], pdu.params['source_addr'],pdu.status, pdu.params['short_message']) )
PySQLPool.commitPool()
else:
print 'unknown route'
# A clean way to tear down and stop
yield chan.basic_cancel("someTag")
yield chan.channel_close()
chan0 = yield conn.channel(0)
yield chan0.connection_close()
reactor.stop()
if __name__ == "__main__":
host = '127.0.0.1'
port = 5672
vhost = '/'
username = 'guest'
password = 'guest'
spec_file = '/etc/jasmin/resource/amqp0-9-1.xml'
spec = txamqp.spec.load(spec_file)
# Connect and authenticate
d = ClientCreator(reactor,
AMQClient,
delegate=TwistedDelegate(),
vhost=vhost,
spec=spec).connectTCP(host, port)
d.addCallback(gotConnection, username, password)
def whoops(err):
if reactor.running:
log.err(err)
reactor.stop()
d.addErrback(whoops)
reactor.run()
I'm able to save the messages in the database , yet I need a way to get the source connector or the user that sent the message and save it in the database as well.
is there a way to achieve it?

There is a script in the github in the jasmin sms. I think that will solve your question.

Related

Python simple DNS resolver: memory leak

This code is a DNS resolver that check from a DB for an entry not older than 5 minutes.
#!/usr/bin/python3
from MySQLdb import _mysql as MySQL
from dnslib import RR, QTYPE, RCODE, A
from dnslib.label import DNSLabel
from dnslib.server import DNSServer, BaseResolver
from time import sleep, time
class MariaResolver(BaseResolver):
DELTA = 300
def __init__(self):
self.password = "********************"
def resolve(self, request, handler):
reply = request.reply()
qname = request.q.qname
fqdn = str(request.q.qname)
try:
if fqdn.find("iut-") == -1:
reply.header.rcode = RCODE.REFUSED
else:
hostname = fqdn.split(".")[0]
timestamp = int(time()) - self.DELTA
query = "SELECT ip FROM dns WHERE record='{}' AND timestamp>{}"
db = MySQL.connect("localhost", "dns", self.password, "salles")
db.query(query.format(hostname, timestamp))
result = db.store_result()
row = result.fetch_row(how=1)
if row:
ip = row[0]["ip"].decode("utf-8")
reply.add_answer(RR(qname, QTYPE.A, ttl=0,
rdata=A(ip)))
else:
reply.header.rcode = RCODE.REFUSED
db.close()
except Exception as e:
print(e)
reply.header.rcode = RCODE.REFUSED
return reply
if __name__ == '__main__':
resolver = MariaResolver()
udp_server = DNSServer(resolver, port=53)
udp_server.start_thread()
while udp_server.isAlive():
sleep(0.1)
This code leaks over time and I do not understand why.
In the Proxmox screenshot, you can see service restarted at the and.

Python Socket with Multiprocessing and Pickle issue

I am having a Pickle issue with SSL client to server communication using multiprocessing.
I have an SSL client that connects to the server:
SSLClient.py
import socket
import struct
import ssl
import copyreg
from os import path
import socket
import os
from pathlib import Path
from loguru import logger as log
from utils.misc import read_py_config
from datetime import datetime
from cryptography.fernet import Fernet
fernetkey = '1234567'
fernet = Fernet(fernetkey)
class SSLclient:
license = None
licenseencrypted = None
uuid = None
def __init__(self):
try:
path = Path(__file__).parent / "/lcl" #get unique license key
with path.open() as file:
self.licenseencrypted = file.read().rstrip()
self.license = fernet.decrypt(str.encode(self.licenseencrypted)).decode('ascii')
self.host, self.port = "127.0.0.1", 65416
except Exception as e:
log.error("Could not decode license key")
def connect(self):
self.client_crt = os.path.join(os.path.dirname(__file__), 'key/c-.crt')
self.client_key = os.path.join(os.path.dirname(__file__), 'key/ck-.key')
self.server_crt = os.path.join(os.path.dirname(__file__), 'key/s-.crt')
self.sni_hostname = "example.com"
self._context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.server_crt)
self._context.load_cert_chain(certfile=self.client_crt, keyfile=self.client_key)
self._sock = None
self._ssock = None
## ---- Client Communication Setup ----
HOST = self.host # The server's hostname or IP address
PORT = self.port # The port used by the server
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._ssock = self._context.wrap_socket(self._sock, server_side=False, server_hostname=self.sni_hostname)
self._ssock.connect((HOST, PORT))
log.info("Socket successfully created")
except socket.error as err:
log.error("socket creation failed with error %s" %(err))
return False
log.info('Waiting for connection')
return True
def closesockconnection(self):
self._ssock.close()
def checkvalidsite(self):
#check if site is active
jsonobj = {
"uuid": self.license,
"ipaddress" : self.external_ip,
"req": "checkvalidsite"
}
send_msg(self._ssock, json.dumps(jsonobj).encode('utf-8'))
active = False
while True:
Response = recv_msg(self._ssock)
if not Response:
return False
if Response is not None:
Response = Response.decode('utf-8')
Response = json.loads(Response)
req = Response['req']
if req == "checkvalidsite":
active = Response['active']
self.info1 = Response['info1']
self.info2 = Response['info2']
return active
# ---- To Avoid Message Boundary Problem on top of TCP protocol ----
def send_msg(sock: socket, msg): # ---- Use this to send
try:
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + msg
sock.sendall(msg)
except Exception as e:
log.error("Sending message " + str(e))
def recv_msg(sock: socket): # ---- Use this to receive
try:
# Read message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Read the message data
return recvall(sock, msglen)
except Exception as e:
log.error("Receiving message " + str(e))
return False
def recvall(sock: socket, n: int):
try:
# Helper function to receive n bytes or return None if EOF is hit
data = bytearray()
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data.extend(packet)
return data
except Exception as e:
log.error("Receiving all message " + str(e))
raise Exception(e)
I then have a server that is Multithreaded and accepts the connection and communicates with the client.
Server.py
import socket
import os
from socket import AF_INET, SOCK_STREAM, SO_REUSEADDR, SOL_SOCKET, SHUT_RDWR
import ssl
from os import path
from _thread import *
import struct # Here to convert Python data types into byte streams (in string) and back
import traceback
from threading import Thread
import json
import mysql.connector as mysql
import time
from loguru import logger as log
import threading
from cryptography.fernet import Fernet
fernetkey = '12213423423'
fernet = Fernet(fernetkey)
threadLocal = threading.local()
# ---- To Avoid Message Boundary Problem on top of TCP protocol ----
def send_msg(sock: socket, msg): # ---- Use this to send
try:
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + msg
sock.sendall(msg)
except Exception as e:
log.error("Error send_msg " + str(e))
def recv_msg(sock: socket): # ---- Use this to receive
try:
# Read message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Read the message data
return recvall(sock, msglen)
except Exception as e:
log.error("Receiving message " + str(e))
return False
def recvall(sock: socket, n: int):
try:
# Helper function to receive n bytes or return None if EOF is hit
data = bytearray()
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data.extend(packet)
return data
except Exception as e:
log.error("Receiving all message " + str(e))
raise Exception(e)
# ---- Server Communication Setup
class Newclient:
def __init__(self):
self.addr = None
self.conn = None
self.uuid = None
class Server:
def __init__(self):
self.HOST = '127.0.0.1' # Standard loopback interface address (localhost)
self.PORT = 65416 # Port to listen on (non-privileged ports are > 1023)
self.ThreadCount = 0
self.threads = []
self.sock = None
def checkvalidsite(self, uuid, ipaddress, cursor, db_connection):
sql = "select * from myexample where uuid ='" + uuid + "'"
cursor.execute(sql)
results = cursor.fetchall()
active = False
for row in results:
active = row["active"]
siteid = row["info1"]
clientid = row["info2"]
return active, siteid, clientid
def Serverthreaded_client(self, newclient):
conn = newclient.conn
try:
while True:
# data = conn.recv(2048) # receive message from client
data = recv_msg(conn)
uuid = None
ipaddress = None
req = None
if not data :
return False
if data is not None:
data = json.loads(data.decode('utf-8'))
uuid = data['uuid']
req = data['req']
if uuid is not None and req is not None:
newclient.uuid = uuid
cursor, db_connection = setupDBConnection()
if req == "checkvalidsite":
ipaddress = data['ipaddress']
active, info1, info2 = self.checkvalidsite(uuid, ipaddress, cursor, db_connection)
data = {
"req": "checkvalidsite",
"uuid": uuid,
"active": active,
"info1" : info1,
"info2" : info2
}
if not data:
break
# conn.sendall(str.encode(reply))
send_msg(conn, json.dumps(data).encode('utf-8'))
log.info("Server response sent")
#conn.close()
closeDBConnection(cursor, db_connection)
else:
#send no message
a=1
except Exception as e:
log.warning(str(e))
log.warning(traceback.format_exc())
finally:
log.info("UUID Closing connection")
conn.shutdown(socket.SHUT_RDWR)
conn.close()
#conn.close()
def Serverconnect(self):
try: # create socket
self.server_cert = path.join(path.dirname(__file__), "keys/server.crt")
self.server_key = path.join(path.dirname(__file__), "keys/server.key")
self.client_cert = path.join(path.dirname(__file__), "keys/client.crt")
self._context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self._context.verify_mode = ssl.CERT_REQUIRED
###self._context.load_cert_chain(self.server_cert, self.server_key)
self._context.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)
###self._context.load_verify_locations(self.client_cert)
self._context.load_verify_locations(cafile=self.client_cert)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) ###<-- socket.socket() ???
log.info("Socket successfully created")
except socket.error as err:
log.warning("socket creation failed with error %s" %(err))
try: # bind socket to an address
self.sock.bind((self.HOST, self.PORT))
except socket.error as e:
log.warning(str(e))
log.info('Waiting for a Connection..')
self.sock.listen(3)
def Serverwaitforconnection(self):
while True:
Client, addr = self.sock.accept()
conn = self._context.wrap_socket(Client, server_side=True)
log.info('Connected to: ' + addr[0] + ':' + str(addr[1]))
log.info("SSL established. Peer: {}".format(conn.getpeercert()))
newclient = Newclient()
newclient.addr = addr
newclient.conn = conn
thread = Thread(target=self.Serverthreaded_client, args =(newclient, ))
thread.start()
self.threads.append(newclient)
self.ThreadCount += 1
log.info('Thread Number: ' + str(self.ThreadCount))
def startserver():
server = Server()
server.Serverconnect()
server.Serverwaitforconnection()
serverthread = Thread(target=startserver)
serverthread.daemon = False
serverthread.start()
The server accepts the connection with SSL then waits for a message. It investigates the message command, executes the respective function and returns the data from the database as a response (checkvalidsite in this example).
All good so far (as far as I can tell).
I also have the main program that calls the SSLClient and connects.
Main program
remoteclient = SSLclient()
successfulconnection = remoteclient.connect()
siteactive = remoteclient.checkvalidsite()
So far all is well. However I also have the main program reading in frames from multiple cameras. Can be 20 cameras for example. In order to do this I created multiprocessing to deal with the camera load. Each camera or two cameras per, are assigned to a processor (depending on the number of cores in the machine).
(code below has been stripped out to simplify reading)
x = range(3, 6)
for n in x:
processes = multiprocessing.Process(target=activateMainProgram, args=(queue1, queue2, queue3, queue4, remoteclient, ))
processes.start()
When I try pass the remoteclient (SSLClient) as an argument I get the error:
cannot pickle 'SSLContext' object
I then (after reading online) added the code to the SSLClient:
def save_sslcontext(obj):
return obj.__class__, (obj.protocol,)
copyreg.pickle(ssl.SSLContext, save_sslcontext)
but then I get the error:
cannot pickle 'SSLContext' object
There are 2 options I experimented with:
Trying to get the pickle working (which would be ideal) as the processes themselves each need to communicate with the server. So the processes need to call functions from the SSLClient file. But I cannot get over the pickle issue and can't find a solution online
I then placed the remoteclient = SSLClient code outside the main function. Hoping it would run first and then be accessible to the processes. This worked, however what I learnt was that when a process is called (as it does not share memory) it reprocesses the entire file. Meaning if I have 10 processes each with 2 cameras then I would have 10 connections to the server (1 per process). This means on the server side I would also have 10 threads running each connection. Though it works, it seems significantly inefficient.
Being a noob and self taught in Python I am not sure how to resolve the issue and after 3 days, I figured I would reach out for assistance. If I could get assistance with the pickle issue of the SSLClient then I will have one connection that is shared with all processes and 1 thread in the server to deal with them.
P.s. I have cobbled all of the code together myself and being new to Python if you see that I am totally going down the wrong, incorrect, non-professional track, feel free to yell.
Much appreciated.
Update:
If I change the SSLClient code to:
def save_sslcontext(obj):
return obj.__class__, (obj.protocol,)
copyreg.pickle(ssl.SSLContext, save_sslcontext)
Then I get the error:
[WinError 10038] An operation was attempted on something that is not a socket
Not sure what is better..

Socket.io not listening to two connections simultaneously Python

Below is the python script written on socket.io. I have data continuously emitted from server to the web browser. However, the client can have emit 'closeSSH' in between and the event 'nodeID' should be stopped. Can someone help me to make socket.io listen to multiple events at the same time?
#!/usr/bin/env python
import sys
# Import 'Flask' libraries for web socket programming with the client/web-browser
from flask import Flask, render_template, request
from flask.ext.socketio import SocketIO, emit
import flask
import pxssh
from socket import *
import socket
from flask.ext.cors import CORS, cross_origin
import time
# INITIALIZATIONS
noOfNodes = 48
# Initialize all the TCP sockets
tcpClientSockets = dict()
for i in range(48):
tcpClientSockets[i] = socket.socket()
tcpPort = 5000
bufferSizeRecPacket = 102400
nodeIPAddress = '192.168.1.'
startNode = 11
endNode = 58
# Class which provides the web socket communication with the Client/Web-browser
class CORNET_3D_WebSocketConnection:
# Configure the Flask server
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
# Change the configuration such that requests from external IP addresses are accepted
cors = CORS(app, resources={r"/foo": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
app.debug = True
socketio = SocketIO(app)
def __init__(self):
# Server to run on port 8888
self.socketio.run(self.app, host = '0.0.0.0', port=8888)
#app.route('/', methods=['GET','POST','OPTIONS'])
#cross_origin(origin='*',headers=['Content-Type','Authorization'])
def index():
# TODO: Get the node status for all the nodes
return ''
#socketio.on('nodeID')
def startSpectrum(initParams):
# TODO: Add the client to the list of clients
# TODO: SSH to the node specified
current_client_id = request.environ['REMOTE_ADDR']
print current_client_id
# Extract the transParams
transParams = initParams['transParams']
nodeID = initParams['nodeID']
# Get the IPv4 address for the node
nodeIPv4Address = nodeIPAddress + str(nodeID)
# Validate the IPv4 Address
try:
validate = CORNET_3D_ValidateParameters()
if validate.isIPv4AddressValid(nodeIPv4Address):
# If parameters are valid in the IPv4 address
if validate.isParamsValid(transParams):
try:
# TODO: Add the client IP address to the list of connected clients
time.sleep(1)
tcpClientSockets[nodeID - startNode].close()
spectrumParams = "PARAMS--f:" + str(transParams['f']) + " b:" + str(transParams['b']) + " n:" + str(transParams['n']) + " r:" + str(transParams['r'])
print spectrumParams
try:
print 'Connecting'
print nodeIPv4Address
tcpClientSockets[nodeID - startNode] = socket.socket()
print 'Create Tcp client sockets'
tcpClientSockets[nodeID - startNode].connect((nodeIPv4Address, tcpPort))
print 'Connected'
tcpClientSockets[nodeID - startNode].send(spectrumParams)
print 'Sent spectrum data'
while True:
spectrumData = tcpClientSockets[nodeID - startNode].recv(bufferSizeRecPacket)
tcpClientSockets[nodeID - startNode].send('spectrum..........................................')
emit('time', spectrumData)
except Exception as e:
print "Exception caused in TCP connection -- Start spectrum. Error: ", e
except Exception as e:
print "Exception caused in Validate -- Start spectrum. Error: ", e
else:
emit('error', 'Invalid parameters for the node')
else:
emit('error','Invalid IP address for the node')
except Exception as e:
print "Exception caused in Start Spectrum. Error: ", e
#socketio.on('getMetrics')
def getMetrics(message):
# TODO: Get metrics to be implemented
print 'Get metrics'
#socketio.on('closeSSH')
def closeSSH(nodeID):
# TODO: Remove the client from the list of connected clients
try:
print 'CLOSE SSH'
#tcpClientSockets[int(nodeID) - startNode].send('exit..............................................')
print 'Exit sent'
tcpClientSockets[int(nodeID) - startNode].shutdown(socket.SHUT_RDWR)
except Exception as e:
print "Exception caused in Closing the connection with the client. Error: ", e
#socketio.on('disconnect')
def disconnect(nodeID):
# TODO: Remove the client from the list of connected clients
try:
print 'DISCONNECT'
tcpClientSockets[int(nodeID) - startNode].send('exit..............................................')
tcpClientSockets[nodeID - startNode].close()
except Exception as e:
print "Exception caused in Closing the connection with the client. Error: ", e
#socketio.on('users_req')
def getClientsConnected():
# TODO: Get the clients that are connected to the web socket through web browsers
print 'Users Req'
# Class which validates parameters and IPv4 address
class CORNET_3D_ValidateParameters:
def isIPv4AddressValid(self, ipAddress):
try:
socket.inet_pton(socket.AF_INET, ipAddress)
except AttributeError:
try:
socket.inet_aton(ipAddress)
except socket.error:
return False
return ipAddress.count == 3
except socket.error:
return False
return True
def isParamsValid(self, parameters):
try:
f = int(parameters['f'])
b = int(parameters['b'])
n = int(parameters['n'])
r = int(parameters['r'])
return True
except ValueError:
return False
if __name__ == '__main__':
webServer = CORNET_3D_WebSocketConnection()

SocketServer rfile.read() very very slow

I am building an HTTPS proxy which can forward all SSL traffic. It is called a transparent tunneling. Anyway, I have a problem with Python's socketserver. When I called rfile.read(), it takes a very long time to return. Even I used select to make sure the I/O is ready, it still takes a very long time. Usually 30s and the client's socket is closed because of timeout. I cannot make the socket unblocking, because I need to read the data first, and then forward all the data I just read to remote server, it must be returned with data first.
My code is following:
import SocketServer
import BaseHTTPServer
import socket
import threading
import httplib
import time
import os
import urllib
import ssl
import copy
from history import *
from http import *
from https import *
from logger import Logger
DEFAULT_CERT_FILE = "./cert/ncerts/proxpy.pem"
proxystate = None
class ProxyHandler(SocketServer.StreamRequestHandler):
def __init__(self, request, client_address, server):
self.peer = False
self.keepalive = False
self.target = None
self.tunnel_mode = False
self.forwardSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Just for debugging
self.counter = 0
self._host = None
self._port = 0
SocketServer.StreamRequestHandler.__init__(self, request, client_address, server)
def createConnection(self, host, port):
global proxystate
if self.target and self._host == host:
return self.target
try:
# If a SSL tunnel was established, create a HTTPS connection to the server
if self.peer:
conn = httplib.HTTPSConnection(host, port)
else:
# HTTP Connection
conn = httplib.HTTPConnection(host, port)
except HTTPException as e:
proxystate.log.debug(e.__str__())
# If we need a persistent connection, add the socket to the dictionary
if self.keepalive:
self.target = conn
self._host = host
self._port = port
return conn
def sendResponse(self, res):
self.wfile.write(res)
def finish(self):
if not self.keepalive:
if self.target:
self.target.close()
return SocketServer.StreamRequestHandler.finish(self)
# Otherwise keep-alive is True, then go on and listen on the socket
return self.handle()
def handle(self):
global proxystate
if self.keepalive:
if self.peer:
HTTPSUtil.wait_read(self.request)
else:
HTTPUtil.wait_read(self.request)
# Just debugging
if self.counter > 0:
proxystate.log.debug(str(self.client_address) + ' socket reused: ' + str(self.counter))
self.counter += 1
if self.tunnel_mode:
HTTPUtil.wait_read(self.request)
print "++++++++++++++++++++++++++"
req=self.rfile.read(4096) #This is the line take very lone time!
print "----------------------------------"
data = self.doFORWARD(req)
print "**************************************"
if len(data)!=0:
self.sendResponse(data)
return
try:
req = HTTPRequest.build(self.rfile)
except Exception as e:
proxystate.log.debug(e.__str__() + ": Error on reading request message")
return
if req is None:
return
# Delegate request to plugin
req = ProxyPlugin.delegate(ProxyPlugin.EVENT_MANGLE_REQUEST, req.clone())
# if you need a persistent connection set the flag in order to save the status
if req.isKeepAlive():
self.keepalive = True
else:
self.keepalive = False
# Target server host and port
host, port = ProxyState.getTargetHost(req)
if req.getMethod() == HTTPRequest.METHOD_GET:
res = self.doGET(host, port, req)
self.sendResponse(res)
elif req.getMethod() == HTTPRequest.METHOD_POST:
res = self.doPOST(host, port, req)
self.sendResponse(res)
elif req.getMethod() == HTTPRequest.METHOD_CONNECT:
res = self.doCONNECT(host, port, req)
def _request(self, conn, method, path, params, headers):
global proxystate
conn.putrequest(method, path, skip_host = True, skip_accept_encoding = True)
for header,v in headers.iteritems():
# auto-fix content-length
if header.lower() == 'content-length':
conn.putheader(header, str(len(params)))
else:
for i in v:
conn.putheader(header, i)
conn.endheaders()
if len(params) > 0:
conn.send(params)
def doRequest(self, conn, method, path, params, headers):
global proxystate
try:
self._request(conn, method, path, params, headers)
return True
except IOError as e:
proxystate.log.error("%s: %s:%d" % (e.__str__(), conn.host, conn.port))
return False
def doCONNECT(self, host, port, req):
#global proxystate
self.tunnel_mode = True
self.tunnel_host = host
self.tunnel_port = port
#socket_req = self.request
#certfilename = DEFAULT_CERT_FILE
#socket_ssl = ssl.wrap_socket(socket_req, server_side = True, certfile = certfilename,
#ssl_version = ssl.PROTOCOL_SSLv23, do_handshake_on_connect = False)
HTTPSRequest.sendAck(self.request)
#print "Send ack to the peer %s on port %d for establishing SSL tunnel" % (host, port)
print "into forward mode: %s : %s" % (host, port)
'''
host, port = socket_req.getpeername()
proxystate.log.debug("Send ack to the peer %s on port %d for establishing SSL tunnel" % (host, port))
while True:
try:
socket_ssl.do_handshake()
break
except (ssl.SSLError, IOError) as e:
# proxystate.log.error(e.__str__())
print e.__str__()
return
# Switch to new socket
self.peer = True
self.request = socket_ssl
'''
self.setup()
#self.handle()
def doFORWARD(self, data):
host, port = self.request.getpeername()
#print "client_host", host
#print "client_port", port
try:
print "%s:%s===>data read, now sending..."%(host,port)
self.forwardSocket.connect((self.tunnel_host,self.tunnel_port))
self.forwardSocket.sendall(data)
print data
print "%s:%s===>sent %d bytes to server"%(host,port,len(data))
select.select([self.forwardSocket], [], [])
chunk = self.forwardSocket.recv(4096)
print chunk
print "%s:%s===>receive %d bytes from server"%(host,port,len(chunk))
return chunk
except socket.error as e:
print e.__str__()
return ''
class ThreadedHTTPProxyServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
class ProxyServer():
def __init__(self, init_state):
global proxystate
proxystate = init_state
self.proxyServer_port = proxystate.listenport
self.proxyServer_host = proxystate.listenaddr
def startProxyServer(self):
global proxystate
self.proxyServer = ThreadedHTTPProxyServer((self.proxyServer_host, self.proxyServer_port), ProxyHandler)
# Start a thread with the server (that thread will then spawn a worker
# thread for each request)
server_thread = threading.Thread(target = self.proxyServer.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.setDaemon(True)
proxystate.log.info("Server %s listening on port %d" % (self.proxyServer_host, self.proxyServer_port))
server_thread.start()
while True:
time.sleep(0.1)
This is driving me crazy, because when I forward normal HTTP traffic, the read() returns immediately with the data. But every time when the client sent SSL traffic (binary), the read() takes very long time to return! And I think there is some mechanics that the read() only returns when the request socket is closed by remote client. Does anyone know how to make the read() fast? I tried recv() and readline(), both of them is as slow as read() when handling binary traffic!
I had similar problem on my python server while trying to send image to it via http.
rfile.read took 1700ms for a 800kb image and 4500ms for a 4.5mb image. I was also doing this through a limited network speed on a vpn.
It appears that the rfile.read downloads/uploads the file. By improving connection/network speed between client-server i have reduced 1700ms read to less than 50ms.

Receiving extended data with ssh using twisted.conch as client

I'm currently in the process of learning ssh via the brute-force/ just keep hacking until I understand it approach. After some trial and error I've been able to successfully send a "pty-req" followed by a "shell" request, I can get the login preamble, send commands and receive stdout but I'm not exactly sure how to tell the SSH service I want to recieve stderr and status messages. Reading through other SSH implementations ( paramiko, Net::SSH ) hasn't been much of a guide at the moment.
That said, looking at one of the RFC's for SSH, I believe that perhaps one of the listed requests might be what I am looking for: https://www.rfc-editor.org/rfc/rfc4250#section-4.9.3
#!/usr/bin/env python
from twisted.conch.ssh import transport
from twisted.conch.ssh import userauth
from twisted.conch.ssh import connection
from twisted.conch.ssh import common
from twisted.conch.ssh.common import NS
from twisted.conch.ssh import keys
from twisted.conch.ssh import channel
from twisted.conch.ssh import session
from twisted.internet import defer
from twisted.internet import defer, protocol, reactor
from twisted.python import log
import struct, sys, getpass, os
log.startLogging(sys.stdout)
USER = 'dward'
HOST = '192.168.0.19' # pristine.local
PASSWD = "password"
PRIVATE_KEY = "~/id_rsa"
class SimpleTransport(transport.SSHClientTransport):
def verifyHostKey(self, hostKey, fingerprint):
print 'host key fingerprint: %s' % fingerprint
return defer.succeed(1)
def connectionSecure(self):
self.requestService(
SimpleUserAuth(USER,
SimpleConnection()))
class SimpleUserAuth(userauth.SSHUserAuthClient):
def getPassword(self):
return defer.succeed(PASSWD)
def getGenericAnswers(self, name, instruction, questions):
print name
print instruction
answers = []
for prompt, echo in questions:
if echo:
answer = raw_input(prompt)
else:
answer = getpass.getpass(prompt)
answers.append(answer)
return defer.succeed(answers)
def getPublicKey(self):
path = os.path.expanduser(PRIVATE_KEY)
# this works with rsa too
# just change the name here and in getPrivateKey
if not os.path.exists(path) or self.lastPublicKey:
# the file doesn't exist, or we've tried a public key
return
return keys.Key.fromFile(filename=path+'.pub').blob()
def getPrivateKey(self):
path = os.path.expanduser(PRIVATE_KEY)
return defer.succeed(keys.Key.fromFile(path).keyObject)
class SimpleConnection(connection.SSHConnection):
def serviceStarted(self):
self.openChannel(SmartChannel(2**16, 2**15, self))
class SmartChannel(channel.SSHChannel):
name = "session"
def getResponse(self, timeout = 10):
self.onData = defer.Deferred()
self.timeout = reactor.callLater( timeout, self.onData.errback, Exception("Timeout") )
return self.onData
def openFailed(self, reason):
print "Failed", reason
#defer.inlineCallbacks
def channelOpen(self, ignoredData):
self.data = ''
self.oldData = ''
self.onData = None
self.timeout = None
term = os.environ.get('TERM', 'xterm')
#winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678')
winSize = (25,80,0,0) #struct.unpack('4H', winsz)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
try:
result = yield self.conn.sendRequest(self, 'pty-req', ptyReqData, wantReply = 1 )
except Exception as e:
print "Failed with ", e
try:
result = yield self.conn.sendRequest(self, "shell", '', wantReply = 1)
except Exception as e:
print "Failed shell with ", e
#fetch preample
data = yield self.getResponse()
"""
Welcome to Ubuntu 11.04 (GNU/Linux 2.6.38-8-server x86_64)
* Documentation: http://www.ubuntu.com/server/doc
System information as of Sat Oct 29 13:09:50 MDT 2011
System load: 0.0 Processes: 111
Usage of /: 48.0% of 6.62GB Users logged in: 1
Memory usage: 39% IP address for eth1: 192.168.0.19
Swap usage: 3%
Graph this data and manage this system at https://landscape.canonical.com/
New release 'oneiric' available.
Run 'do-release-upgrade' to upgrade to it.
Last login: Sat Oct 29 01:23:16 2011 from 192.168.0.17
"""
print data
while data != "" and data.strip().endswith("~$") == False:
try:
data = yield self.getResponse()
print repr(data)
"""
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
except Exception as e:
print e
break
self.write("false\n")
#fetch response
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
false
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
self.write("true\n")
#fetch response
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
true
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
self.write("echo Hello World\n\x00")
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
echo Hello World
Hello World
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
#Close up shop
self.loseConnection()
dbgp = 1
def request_exit_status(self, data):
status = struct.unpack('>L', data)[0]
print 'status was: %s' % status
def dataReceived(self, data):
self.data += data
if self.onData is not None:
if self.timeout and self.timeout.active():
self.timeout.cancel()
if self.onData.called == False:
self.onData.callback(data)
def extReceived(self, dataType, data):
dbgp = 1
print "Extended Data recieved! dataType = %s , data = %s " % ( dataType, data, )
self.extendData = data
def closed(self):
print 'got data : %s' % self.data.replace("\\r\\n","\r\n")
self.loseConnection()
reactor.stop()
protocol.ClientCreator(reactor, SimpleTransport).connectTCP(HOST, 22)
reactor.run()
Additionally I tried adding in an explicit bad command to the remote shell:
self.write("ls -alF badPathHere\n\x00")
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
ls -alF badPathHere
ls: cannot access badPathHere: No such file or directory
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
And it looks like stderr is being mixed into stderr
Digging through the source code for OpenSSH, channel session logic is handled in session.c at line
2227 function -> session_input_channel_req which if given a pty-req then a "shell" request leads to do_exec_pty which ultimately leads to the call to session_set_fds(s, ptyfd, fdout, -1, 1, 1). The forth argument would normally be a file descriptor responsible for handling stderr but since none is supplied then there won't be any extended data for stderr.
Ultimately, even if I modified openssh to provide a stderr FD, the problem resides with the shell. Complete guess work at this point but I believe that similar to logging into a ssh service via a terminal like xterm or putty, that stderr and stdout are sent together unless explicitly redirected via something like "2> someFile" which is beyond the scope of a SSH service provider.

Categories

Resources