Measure Latency between client and middle node TOR Python - python

I have a question releted to Tor middle node. What are the possible ways to get all the node. second the method i follow is below
def renew_tor_circuit(): #renewing Tor circuit for every request, default port is 9050. renewing port is 9051
with Controller.from_port(port = 9051) as controller_handler:
controller_handler.authenticate('welcome')
controller_handler.signal(Signal.NEWNYM)
time.sleep(10)
for circuit in controller_handler.get_circuits():
# print(controller_handler.get_circuit(circuit.id))
entry_node_fingerprint = controller_handler.path[0][0]
#middle_node_fingerprint = circuit.path[1][0]
exit_node_fingerprint = circuit.path[2][0]
entry_node_descriptor = controller_handler.get_network_status(entry_node_fingerprint)
#middle_node_descriptor = controller_handler.get_network_status(middle_node_fingerprint)
exit_node_descriptor = controller_handler.get_network_status(exit_node_fingerprint)
print("exitip|%s" % (entry_node_descriptor.address))
#print("exitip|%s" % (middle_node_descriptor.address))
print("exitip|%s" % (exit_node_descriptor.address))
print('---------------------')
PROBLEM:: problem with this code it that it stop working most of the time. just start printing ---------. and if run it does not change ip.
What I want: I want to check the latency between Client and the Middle node along with RTT such that the RTT should be measured by sending a relay connect cell to a dummy destination, e.g., localhost.
for latency my idea is::
t1 = time.get_current_time() middle_node_descriptor = controller_handler.get_network_status(middle_node_fingerprint) t2 = time.get_current_time() t3 = t2-t1
NOTE didn't try it because of the above code is not working.

Related

Issue in calculating frames sent per second opencv

I have the following code, I am calculating time taken to send frames per second from client to server, basically calculating the percentage of frames drop rate and time taken to respond back and communication mode is asynchronous.
Now I am facing some issue in calculating the two metrics, for time taken to respond I have set delay to be more than 5 seconds because due to network and processing speed of server it takes time to send result back to client, therefore, longer delay, but for frames per second, I need to calculate how many frames are sent from the client to server per second, how would I calculate this in the data_rate method. both metrics need different time delay, I cant use same time delay for both metrics. Help is highly appreciated on how to define this in the code.
IMAGE_FOLDER = "videoframe"
FPS = 5
SERVER_A_ADDRESS = "tcp://localhost:5555"
ENDPOINT_HANDLER_ADDRESS = "tcp://*:5553"
SERVER_A_TITLE = "SERVER A"
SERVER_B_TITLE = "SERVER B"
context = zmq.Context()
socket_server_a = context.socket(zmq.PUSH)
socket_server_endpoint = context.socket(zmq.PULL)
socket_server_a.connect(SERVER_A_ADDRESS)
socket_server_endpoint.bind(ENDPOINT_HANDLER_ADDRESS)
destination = {
"currentSocket": socket_server_a,
"currentServersTitle": SERVER_A_TITLE,
"currentEndpoint": SERVER_B_TITLE,}
running = True
endpoint_responses = 0
frame_requests = 0
filenames = [f"{IMAGE_FOLDER}/frame{i}.jpg" for i in range(1, 2522)]
def handle_endpoint_responses():
global destination, running, endpoint_responses
while running:
endpoint_response = socket_server_endpoint.recv().decode()
endpoint_responses += 1
def data_rate():
global destination, running, endpoint_responses, frame_requests
while running:
before_received = endpoint_responses ###
time.sleep(5)
after_received = endpoint_responses
before_sent = frame_requests
time.sleep(1)
after_sent = frame_requests ###
print(25 * "#")
print(f"{time.strftime('%H:%M:%S')} ( i ) : receiving model results: {round((after_received - before_received) / 5, 2)} per second.")
print(f"{time.strftime('%H:%M:%S')} ( i ) : sending frames: {round((after_sent - before_sent) / 1, 2)} per second.")
print(25 * "#")
def send_frame(frame, frame_requests):
global destination, running
try:
frame = cv2.resize(frame, (224, 224))
encoded, buffer = cv2.imencode('.jpg', frame)
jpg_as_text = base64.b64encode(buffer)
destination["currentSocket"].send(jpg_as_text)
except Exception as Error:
running = False
def main():
global destination, running, frame_requests
interval = 1 / FPS
while running:
for img in filenames:
frame = cv2.imread(img)
frame_requests += 1
threading.Thread(target=send_frame, args=(frame, frame_requests)).start()
time.sleep(interval)
destination["currentSocket"].close()
if __name__ == "__main__":
threading.Thread(target=handle_endpoint_responses).start()
threading.Thread(target=data_rate).start()
main()
Not only the server time, opening the image also takes time, using sleep interval = 1/FPS may lead to a frame drop too, i.e. producing less frames than possible (the same if playing offline). For playing, if done with sleep, the interval could be shorter and current time could be checked in the loop, and if the time is appropriate - sending the next frame, if not - just wait. The delay could be adaptive also and that time may be ahead of the linear period, in order to compensate for the transmission delay, if the goal is the server-side to display or do something with the image at actual frame time.
I think you have to synchronize/measure the difference of the clocks of the client and the server with an initial hand-shake or as part of the transactions, each time, and to include and log the time of sending/receiving in the transactions/log.
That way you could measure some average delay of the transmission.
Another thing that may give hints is initially to play the images without sending them. That will show how much time cv2.imread(...) and the preprocessing take.
I.e. commenting/adding another function without destination["currentSocket"].send(jpg_as_text)

Python - Entire PC network gets slower during repeated request

First of all, I am new to the network (HTTP communication) and Python.
I am currently using requests and threading module to periodically send or receive data with a specific site. The target site is 'https://api.coinone.co.kr' but I think it does not matter here.
By the example code below, I let Python fetch data every 1 second. At first, it works pretty well. Each request takes about 0.07 s in my computer.
import requests
import time
import threading
url0 = 'https://api.coinone.co.kr/ticker/'
class Fetch:
def __init__(self):
self.thread = threading.Thread(target=self.fcn)
self.t0 = time.perf_counter()
self.period = 1
self.req0 = None
def fcn(self):
while True:
# headers = None
headers = {'Connection': 'close'}
# requests
t0 = time.perf_counter()
req0 = requests.get(url0, headers=headers, params={'currency': 'all'})
resp0 = req0.json()
self.req0 = req0
reqTimeInt0 = time.perf_counter() - t0
# prints
print('elapsed time: %0.3f' % t0)
# print(req0.request.headers)
print(resp0['result'])
print('requests time interval: %0.3f' % reqTimeInt0)
print('')
# timer
self.t0 += self.period
now = time.perf_counter()
sleepInterval = self.t0 - now
if sleepInterval > 0:
time.sleep(sleepInterval)
else:
while self.t0 - now <= 0:
self.t0 += self.period
f1 = Fetch()
f1.thread.start()
But as time passes, the time needed for each 'http get' increases. After about 3 hours, one request takes 0.80 s where it is 10 times larger than it took in the initial state. Furthermore, not only does Python request get slower, but also the entire PC network gets slower (including internet browsing) without any increase in CPU, RAM resources, and network usage. Closing the console does not get back the network speed to normal and I have to reboot the PC. Anyway, after rebooting, the network is completely recovered and the internet works fine.
It seems like some burdens in the network connection are accumulated at each Python request. So I tried adding 'Connection: close' to the header, but it didn't work. Will 'requests.Session()' fix the problem?
I don't even know what to do to figure out the problem. I have to make the repeated requests for at least several days without breaking the connection.
Thank you.
Use a session, as it won't open new network connections, just use one, to make all the requests.
There is the preferred modifications:
class Fetch:
def __init__(self):
self.session = requests.Session
self.thread = threading.Thread(target=self.fcn)
self.t0 = time.perf_counter()
self.period = 1
self.req0 = None
def fcn(self):
while True:
# headers = None
headers = {'Connection': 'close'}
# requests
t0 = time.perf_counter()
req0 = self.session.get(url0, headers=headers, params={'currency': 'all'})
resp0 = req0.json()
self.req0 = req0
... other codes goes there ...

Sending "random" traffic through Mininet network

I want to test a data center routing algorithm using Mininet. The traffic needs to conform to certain parameters:
It should consist of "files" of various sizes (note that these don't actually have to be files; traffic generated in, e.g., iperf is OK, as long as the size is controllable);
The file sizes should be drawn from a particular distribution;
The source/destination host pairs between which data is sent should be selected randomly for a given file;
The interval between when a file is sent and its successor is sent should be random; and
If a huge file gets sent between two hosts that takes a long time to transfer, it should still be possible to send data between other hosts in the network.
Points 1-4 are taken care of. I've been struggling with #5 for days and I can't get it working properly. My initial thought was to spawn subprocesses/threads to send iperf commands to the hosts:
while count < 10:
if (count % 2) == 0:
host_pair = net.get("h1", "h2")
else:
host_pair = net.get("h3", "h4")
p = multiprocessing.Process(target=test_custom_iperf, args=(net, host_pair, nbytes))
p.daemon = True
p.start()
time.sleep(random.uniform(0, 1))
The command test_custom_iperf is modeled after the Python Mininet API's version of iperf to include the -n transfer size parameter:
client, server = host_pair
print client, server
output( '*** Iperf: testing TCP bandwidth between',
client, 'and', server, '\n' )
server.sendCmd( 'iperf -s' )
if not waitListening( client, server.IP(), 5001 ):
raise Exception( 'Could not connect to iperf on port 5001' )
cliout = client.cmd( 'iperf -c ' + server.IP() + ' -n %d' % nbytes )
print cliout
server.sendInt()
servout = server.waitOutput()
debug( 'Server output: %s\n' % servout)
result = [ net._parseIperf( servout ), net._parseIperf( cliout ) ]
output( '*** Results: %s\n' % result )
Making this non-blocking has been extremely difficult. I need to be able to send the server.sendInt() command, for some reason, and to do this I need to wait for the client's command to finish.
I'd appreciate any advice on what I can try to make this work!
I took a hint from here and used Mininet's host.popen() module to send the data around. Hopefully this helps someone else:
def send_one_file(file_dir, host_pair, files):
src, dst = host_pair # a tuple of Mininet node objects
# choose a random file from files
rand_fname = random.sample(files, 1)[0]
rand_path = os.path.join(file_dir, rand_fname)
port = random.randint(1024, 65535)
# Start listening at the destination host
dst_cmd = 'nc -l %d > /home/mininet/sent/%s.out' % (port, rand_fname)
print os.path.getsize(rand_path)
dst.popen( dst_cmd, shell=True )
# Send file from the source host
src_cmd = 'nc %s %s < %s' % (dst.IP(), port, rand_path)
src.popen( src_cmd, shell=True )
Then the parent function calls send_one_file() at random intervals:
def test_netcat_subprocess_async(net, duration):
file_dir = "/home/mininet/sf_mininet_vm/data/MVI_0406_split"
files = os.listdir(file_dir)
start_time = time.time()
end_time = start_time + duration
# Transfer for the desired duration
while time.time() < end_time:
# Choose a pair of hosts
host_pair = random.sample(net.hosts, 2)
test_send_one_file_netcat(file_dir, host_pair, files)
interval = random.uniform(0.01, 0.1)
print "Initialized transfer; waiting %f seconds..." % interval
time.sleep(interval)
This works without any of the problems I experienced with multiprocessing or threading (breaking the network after the session is over, blocking when it shouldn't, etc.).

python multiprocessing agents and udp listeners

I have a master class (server for want of a better term) that creates multiple clients (from a client class) usingthge mulitproccessing library.
class mantransact:
def __init__(self,runMode,f_xml):
#call the build nodes function
self.buildNodes()
sockLisProcess = multiprocessing.Process(target=self.sockListener())
sockLisProcess.start()
self.initiateTransactions()
def buildNodes(self,):
n_source = self.f_xml.getElement("nodeSource")
self.log.addToLog ("node source is - %s" % n_source)
self.n_file = load_source.load_source(n_source,"csv")
#remove header from node list
del self.n_file.data_list[0]
self.nodes = [self.mkproc(node, l) for l in self.n_file.data_list]
self.log.addToLog(self.nodes)
def mkproc(self, func, line):
l = "-".join(line)
p = multiprocessing.Process(target=func, args=(l, self.f_xml))
p.start()
return (line[0], p)
def sockListener(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_addresss = ('localhost',10099)
self.sock.bind(server_addresss)
while True:
self.log.addToLog("server is waitin")
data, address = self.sock.recvfrom(1024)
self.log.addToLog(data, address)
def sockSender(self,client_address,d):
self.sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock2.bind(('localhost',10098))
recip = ('localhost',int(client_address))
self.sock2.sendto(str(d),recip)
self.sock2.close()
def initiateTransactions(self,):
#loop through transaction and then loop node list to match at match transmit transaction
#using UDP ports
for t in self.t_file.data_list:
for n in self.nodes:
if t[0] == n[0]:
for l in self.n_file.data_list:
if l[0] == n[0]:
self.log.addToLog ("gonna transmit UDP transaction to node - %s" % n[0])
client_address = l[1]
pip = n[2]
t.insert(0, "nTransaction")
self.sockSender(client_address, t)
I am trying to create UDP listeners at both the client and the nodes:
class node:
def __init__(self,child_conn, line, f_xml):
l = line.split("-")
"""extract calues from array and use f_xml for config"""
self.proofProcess = multiprocessing.Process(target=self.runProof(self.waitingTransactions))
self.proofProcess.start()
self.listenProcess = Multiprocessing.Process(target=self.udpListener())
self.listenProcess.start()
def udpListener(self):
lsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
lsock.bind(("localhost",int(self.IP)))
while 1 ==1:
data, addr = lsock.recvfrom(1024)
print ("received message", data)
"""do some things with data"""
I have two issues:
1 with my server I want my code to kick off these processes and then continue on instantiating or performing other tasks but the code just hangs waiting for the listener to receive a packet. Am i instantiating processing incorrectly:
2 with my clients they are performing a task to solve a problem and don't start the listeners until that task is complete. Can they not start their task an listen in parallel? The listener is meant to interrupt the calculation if another clietn solves it first and then it will receive a new task from the server
I found the solution.
By putting the multiprocessing element into a seperate process:
def loopProcesses(self,procedureName):
processX = multiprocessing.Process(target=procedureName)
processX.start()
return processX
And putting the names of the processes to be used into an array which is looped through to call the loopProcesses() process both processes were started in parallel.
m_processes = [self.sockListener(), self.initiateTransactions()]
l_processes = [self.loopProcesses(mp) for mp in m_processes]
The above did not work as the functions called are in a continuous loop until they have found a number of solutions. A problem was occuring when the first function was called it would start without the start command. I later found that I have to call the function without using the '()' and then the funciton will wait. ammended code is:
p = [multiprocessing.Process(target=self.sockListener),multiprocessing.Process(target=self.initiateTransactions)]
for prc in p:
prc.start()
I found this after a lot of seraching and came accross this: Socketserver multiprocessing.Process is starting without calling start()

Failure in arp poisoning python (linux)

Okay, so I'm running Ubunutu 14.04 LTS, and I'm trying to poison my own ARP Cache, by doing this,
my private IP address is 10.0.0.1.
My phone's private IP address is 10.0.0.8.
for this example only let's say my MAC address is axaxaxaxaxax.
I've wrote the following python code:
from binascii import *
from struct import *
import socket;
class ethernetframe:
def __init__(self, destmac, srcmac, ethrtype):
self.destmac = unhexlify(destmac)
self.srcmac = unhexlify(srcmac)
self.ethrtype = unhexlify(ethrtype)
def uniteframe(self, payload):
frame = ''
frame = frame + self.destmac
frame = frame + self.srcmac
frame = frame + self.ethrtype
frame = frame + payload
frame = frame + unhexlify("00000000")
return frame
class arppacket:
def __init__(self,opcode,srcmac,srcip,dstmac,dstip):
if opcode == 1:
dstmac = "000000000000"
opcode = "0001"
else:
opcode = "0002"
self.opcode = unhexlify(opcode)
self.srcmac = unhexlify(srcmac)
self.srcip = pack('!4B',srcip[0],srcip[1],srcip[2],srcip[3])
self.dstmac = unhexlify(dstmac)
self.dstip = pack('!4B',dstip[0],dstip[1],dstip[2],dstip[3])
def unitepacket(self):
packet = ''
packet = packet + "\x00\x01\x08\x00\x06\x04"
packet = packet + self.opcode
packet = packet + self.srcmac
packet = packet + self.srcip
packet = packet + self.dstmac
packet = packet + self.dstip
return packet
e1 = ethernetframe("axaxaxaxaxax","axaxaxaxaxax","0800")
arp1 = arppacket(2,"axaxaxaxaxax",(10,0,0,8),"axaxaxaxaxax",(10,0,0,1))
arpacket = arp1.unitepacket()
fullethframe = e1.uniteframe(arpacket)
s = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.htons(0x0806))
s.bind(("eth0",0))
s.send(fullethframe)
now, I'm monitoring this whole process with Wireshark, the ARP packet is being send and it is formed correctly, In wire shark I see the following line:
10.0.0.8 is at axaxaxaxaxax
This means that I have successfully sent an ARP reply! to my own computer, stating that the MAC address that is resolved for 10.0.0.8 is axaxaxaxaxax
since ARP cache automatically update if a reply is received REGARDLESS if a request was sent, this means that in my NIC driver's arp cache there should've been a line added stating that
10.0.0.8 is resolved with axaxaxaxaxax
however, when I run inside my ubunutu's terminal
arp - a
or
arp - an
it doesn't show up....., which means I've failed to poison my own ARP cache, any ideas how to fix this?
Just a thought here - did you try
arp -an
Without the -n, arp will try to do a reverse name lookup on the hostname(s).

Categories

Resources