I have a small program to ping multiple IPs at the same time via sending 10 pings. This both records the results within a dict and also prints the status to a page.
However I want to allow the program to ping constantly and for the user to stop it rather then rely on a max ping count.
import os
import re
import time
import sys
import subprocess
import Queue
import threading
class pinger:
def __init__(self,hosts):
self.q = Queue.Queue()
self.all_results = []
self.hosts = hosts
def send_ping(self,q,ip):
self.q.put(self.record_results(ip))
def record_results(self,ip):
ping_count = 0
host_results = {
"host" : ip,
"device" : None,
"sent_count" : 0,
"success_count": 0,
"fail_count": 0,
"failed_perc": 0,
"curr_status": None
}
while ping_count < 10:
rc = subprocess.call(['ping', '-c', '1', '-W', '1', ip], stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
ping_count += 1
# update stats
host = host_results['host']
sent_count = host_results['sent_count']
success_count = host_results['success_count']
fail_count = host_results['fail_count']
failed_perc = host_results['failed_perc']
curr_status = host_results['curr_status']
sent_count += 1
if rc == 0:
success_count += 1
curr_status = "Successful Response"
else:
fail_count += 1
curr_status = "Request Timed Out"
failed_perc = ( fail_count / sent_count ) * 100
host_results.update({'failed_perc': failed_perc, 'fail_count': fail_count, 'success_count': success_count, 'curr_status': curr_status, 'sent_count': sent_count})
time.sleep(0.5)
print host_results
self.all_results.append(host_results)
return True
def go(self):
for i in self.hosts:
t = threading.Thread(target=self.send_ping, args = (self.q,i))
t.daemon = True
t.start()
Thanks,
You could change the while ping_count < 10 condition to while self.should_ping: (the variable would be initialized to True). Also, if there is a main loop where you wait to gather all the results, you can wrap it with try: except KeyboardInterrupt: and set pinger.should_ping to False in exception handler.
Otherwise, you could register to SIGINT signals, as mentioned by #bereal, and set the should_ping variable to False there.
Related
For my program, I have a file that writes random integers to a .CSV file.
from __future__ import absolute_import, division, print_function
from numpy.random import randint as randrange
import os, argparse, time
from tqdm import tqdm
def write_to_csv(filename, *args, newline = True):
write_string = ''
for arg in args:
if type(arg) == list:
for i in arg:
write_string += str(i) + ','
else:
write_string += str(arg) + ','
if newline:
write_string = write_string.rstrip(',') + '\n'
else:
write_string = write_string.rstrip(',')
with open(filename+'.csv', 'a') as file:
file.write(write_string)
def move_dir(dirname, parent = False):
if not parent:
dirname = str(dirname)
exists = os.path.isfile(dirname)
try:
os.mkdir(dirname)
os.chdir(dirname)
except FileExistsError:
os.chdir(dirname)
else:
os.chdir("..")
def calculate_probability(odds, exitmode = False, low_cpu = 0):
try:
file_count = 0
move_dir('Probability')
move_dir(str(odds))
d = {}
writelist = []
percentlist = []
for i in tqdm(range(odds)):
d[str(i)] = 0
writelist.append(f'Times {i}')
percentlist.append(f'Percent {i}')
while True:
if os.path.isfile(str(file_count)+'.csv'):
file_count += 1
else:
break
filename = str(file_count)
write_to_csv(filename, 'Number', 'Value')
rep = 500 * odds
if rep > 10000:
rep = 10000
for i in tqdm(range(rep)):
ran = randrange(odds)
ran = int(ran)
d[str(ran)] += 1
if i == 999:
write_to_csv(filename, i, ran+1, newline = False)
else:
write_to_csv(filename, i, ran+1)
if low_cpu:
time.sleep(0.01*float(low_cpu))
writelist2 = []
percentlist2 = []
for i in tqdm(range(odds)):
val = d[str(i)]
writelist2.append(val)
percentlist2.append(round(((val/rep)*100), 2))
if os.path.isfile('runs.csv'):
write_to_csv('runs', file_count, writelist2, percentlist2)
else:
write_to_csv('runs', 'Run #', writelist, percentlist)
write_to_csv('runs', file_count, writelist2, percentlist2)
if exitmode:
exit()
except(KeyboardInterrupt, SystemExit):
if exitmode:
os.remove(str(file_count)+'.csv')
exit()
else:
try:
os.system('cls')
print('User/program interrupted, lauching shutdown mode...')
os.remove(str(file_count)+'.csv')
print('Finilizaing current trial...')
os.chdir("..")
os.chdir("..")
except FileNotFoundError:
exit()
calculate_probability(odds, exitmode = True)
I also have a repetition system to do this multiple times.
def run_tests(times, odds, low_cpu = 0, shutdown = False):
for i in tqdm(range(times)):
calculate_probability(odds, low_cpu = low_cpu)
os.chdir("..")
os.chdir("..")
if shutdown:
os.system('shutdown /S /F /T 0 /hybrid')
However, if I were to run like 30 trails, it would take forever. So I decided to use the multiprocessing module to speed up the process. Because each run needs to write to the same file at the end, I had to collect the data and write them after the processes ended.
def calculate_probability(odds, low_cpu = 0):
try:
file_count = 0
move_dir('Probability')
move_dir(str(odds))
d = {}
writelist = []
percentlist = []
for i in tqdm(range(odds)):
d[str(i)] = 0
writelist.append(f'Times {i}')
percentlist.append(f'Percent {i}')
while True:
if os.path.isfile(str(file_count)+'.csv'):
file_count += 1
else:
break
filename = str(file_count)
write_to_csv(filename, 'Number', 'Value')
rep = 500 * odds
if rep > 10000:
rep = 10000
for i in range(rep):
ran = randrange(odds)
ran = int(ran)
d[str(ran)] += 1
if i == 999:
write_to_csv(filename, i, ran+1, newline = False)
else:
write_to_csv(filename, i, ran+1)
if low_cpu:
time.sleep(0.01*float(low_cpu))
writelist2 = []
percentlist2 = []
for i in range(odds):
val = d[str(i)]
writelist2.append(val)
percentlist2.append(round(((val/rep)*100), 2))
return (writelist, percentlist, writelist2, percentlist2)
except(KeyboardInterrupt, SystemExit):
try:
os.remove(str(file_count)+'.csv')
finally:
exit()
def worker(odds, returndict, num, low_cpu = 0):
returndict[f'write{num}'] = calculate_probability(odds, low_cpu = low_cpu)
os.chdir("..")
os.chdir("..")
os.system('cls')
def run_tests(times, odds, low_cpu = 0, shutdown = False):
print('Starting...')
manager = Manager()
return_dict = manager.dict()
job_list = []
for i in range(times):
p = Process(target=worker, args=(odds,return_dict,i), kwargs = {'low_cpu' : low_cpu})
job_list.append(p)
p.start()
try:
for proc in job_list:
proc.join()
except KeyboardInterrupt:
print('User quit program...')
time.sleep(5)
for proc in job_list:
proc.join()
exit()
else:
move_dir('Probability')
move_dir(str(odds))
if not os.path.isfile('runs.csv'):
write_to_csv('runs', return_dict.values()[0][0], return_dict.values()[0][1])
for value in return_dict.values():
write_to_csv('runs', value[2], value[3])
print('Done!')
finally:
if shutdown:
os.system('shutdown /S /F /T 0 /hybrid')
However, when I run this new code, there is one progressbar, and each process overwrites the bar, so the bar is flashing with random numbers, making the bar useful. I want to have a stack of bars, one for each process, that each update without interrupting the others. The bars do not need to be ordered; I just need to have an idea of how fast each process is doing their tasks.
STDOUT is just a stream, and all of your processes are attached to the same one, so there's no direct way to tell it to print the output from different processes on different lines.
Probably the simplest way to achieve this would be to have a separate process that is responsible for aggregating the status of all the other processes and reporting the results. You can use a multiprocessing.Queue to pass data from the worker threads to the status thread, then the status thread can print the status to stdout. If you want a stack of progress bars, you'll have to get a little creative with the formatting (essentially update all the progress bars at the same time and print them in the same order so they appear to stack up).
So Im trying to code a really simple Internet Download Manager Spoof with Python 2.7
It is supposed to query a files HTTP header, get the byte range and spread the download among a no.of threads(I hard-coded 2 for simplicity) according to the byte range and later join the file parts together again.
The problem is my console log tells me that only 1 thread is started.
[EDIT] The problem has been solved. Find the working code below.
Here is my source:
from __future__ import print_function
import threading
import urllib
import urllib2
import time
threads = []
# url to open
url = "http://www.sample-videos.com/video/mp4/720/big_buck_bunny_720p_1mb.mp4"
u = urllib.urlopen(url)
# define file
file_name = "test.mp4"
f = open(file_name, 'wb')
# open url and get header info
def get_file_size(url):
stream_size = u.info()['Content-Length']
end = stream_size
return end
start = 0
#get stream size
end = get_file_size(url)
# specify block size
block_sz = 512
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread1():
full_stream_size = end
first_thread = {'start':0, 'end':(int(full_stream_size)/2)}
print(first_thread)
return first_thread
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread2():
full_stream_size = end
second_thread= {'start':int(full_stream_size)/2,'end': int(full_stream_size)}
print(second_thread)
return second_thread
# download function
def download_thread(url ,id,start,end):
current_size = int(float(start)/1024)
total_size = int(float(end)/1024)
print ("Start at_"+str(current_size) + "Ends at_" + str(total_size))
# specify request range and init stream
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (start, end)
data = urllib2.urlopen(req)
while True:
buffer = u.read(block_sz)
if not buffer:
break
start += len(buffer)
f.write(buffer)
thread_id = id
#percentage = (current_size * 100 / total_size)
status = str(thread_id) + "_" + str(current_size) + "_" +str(total_size)
print (status)
#starts 2 threads
def start_threads():
for i in range(2):
#if first loop, start thread 1
if(i==1):
start = calculate_no_of_bytes_for_thread1().get('start')
end = calculate_no_of_bytes_for_thread1().get('end')
print("Thread 1 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
#if second loop, start thread 1
if(i==2):
start = calculate_no_of_bytes_for_thread2().get('start')
end = calculate_no_of_bytes_for_thread2().get('end')
print("Thread 2 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
# Join threads back (order doesn't matter, you just want them all)
for i in threads:
i.join()
#start benchmarking
start_time = time.clock()
start_threads()
print ("Finito!")
end_time = time.clock()
benchmark = str(end_time - start_time)
print ("Download took_" +benchmark)
f.close()
And the output:
{'start': 0, 'end': 527868}
{'start': 0, 'end': 527868}
Thread 1 started
Start at_0Ends at_515
1_0_515
1_0_515
Finito!
Download took_6.97844422658
Working code:
from __future__ import print_function
import threading
import urllib
import urllib2
import time
threads = []
parts = {}
# url to open
url = "http://www.sample-videos.com/audio/mp3/india-national-anthem.mp3"
u = urllib.urlopen(url)
# define file
file_name = "test.mp3"
f = open(file_name, 'wb')
# open url and get header info
def get_file_size(url):
stream_size = u.info()['Content-Length']
file_size = stream_size
return file_size
start = 0
#get stream size
end = get_file_size(url)
# specify block size
block_sz = 512
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread1():
full_stream_size = end
first_thread = {'start':0, 'end':(int(full_stream_size)/2)}
print(first_thread)
return first_thread
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread2():
full_stream_size = end
second_thread= {'start':int(full_stream_size)/2,'end': int(full_stream_size)}
print(second_thread)
return second_thread
# download function
def download_thread(url ,id,start,end):
current_size = int(float(start)/1024)
total_size = int(float(end)/1024)
print ("Start at_"+str(current_size) + "Ends at_" + str(total_size))
# specify request range and init stream
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (start, end)
while True:
buffer = u.read(block_sz)
if not buffer:
break
start += len(buffer)
f.write(buffer)
thread_id = id
status = "Thread ID_" +str(thread_id) + "Downloaded_" + str(int(start/1024)) + "Total_" +str(total_size)
print (status)
#starts 2 threads
def start_threads():
for i in range(2):
#if first loop, start thread 1
if(i==0):
start = calculate_no_of_bytes_for_thread1().get('start')
end = calculate_no_of_bytes_for_thread1().get('end')
print("Thread 1 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
#if second loop, start thread 2
if(i==1):
start = calculate_no_of_bytes_for_thread2().get('start')
end = calculate_no_of_bytes_for_thread2().get('end')
print("Thread 2 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
# Join threads back (order doesn't matter, you just want them all)
for i in threads:
i.join()
# Sort parts and you're done
# result = ''
# for i in range(2):
# result += parts[i*block_sz]
#start benchmarking
start_time = time.clock()
start_threads()
print ("Finito!")
end_time = time.clock()
benchmark = str(end_time - start_time)
print ("Download took_" +benchmark)
f.close()
You have:
for i in range(2):
if(i==1):
...
if(i==2):
...
But range(2) iterates over [0,1] not [1,2].
Save some trouble and just remove those 3 lines. The code to start the two threads can just run serially.
i have some text file which contain proxy ip .
which look like following
130.14.29.111:80
130.14.29.120:80
130.159.235.31:80
14.198.198.220:8909
141.105.26.183:8000
160.79.35.27:80
164.77.196.75:80
164.77.196.78:45430
164.77.196.78:80
173.10.134.173:8081
174.132.145.80:80
174.137.152.60:8080
174.137.184.37:8080
174.142.125.161:80
after processing check this proxy , then i want to marked as following
total number of '0' = 8
total number of 'x' = 6
percentage = alive 60% , dead 40%
x 130.14.29.111:80
0 130.14.29.120:80
0 130.159.235.31:80
0 14.198.198.220:8909
0 141.105.26.183:8000
0 160.79.35.27:80
x 164.77.196.75:80
x 164.77.196.78:45430
x 164.77.196.78:80
0 173.10.134.173:8081
0 174.132.145.80:80
0 174.137.152.60:8080
x 174.137.184.37:8080
x 174.142.125.161:80
how can be done with python? or some sample
if anyone would help me or enlight me much aprreciate!
i was edited
this is script source of what i have
finally check finished proxy list are saved to 'proxy_alive.txt'
in this file i want to mark whether proxy element alive or not.
import socket
import urllib2
import threading
import sys
import Queue
import socket
socket.setdefaulttimeout(7)
print "Bobng's proxy checker. Using %s second timeout"%(socket.getdefaulttimeout())
#input_file = sys.argv[1]
#proxy_type = sys.argv[2] #options: http,s4,s5
#output_file = sys.argv[3]
input_file = 'proxylist.txt'
proxy_type = 'http'
output_file = 'proxy_alive.txt'
url = "www.seemyip.com" # Don't put http:// in here, or any /'s
check_queue = Queue.Queue()
output_queue = Queue.Queue()
threads = 20
def writer(f,rq):
while True:
line = rq.get()
f.write(line+'\n')
def checker(q,oq):
while True:
proxy_info = q.get() #ip:port
if proxy_info == None:
print "Finished"
#quit()
return
#print "Checking %s"%proxy_info
if proxy_type == 'http':
try:
listhandle = open("proxylist.txt").read().split('\n')
for line in listhandle:
saveAlive = open("proxy_alive.txt", 'a')
details = line.split(':')
email = details[0]
password = details[1].replace('\n', '')
proxy_handler = urllib2.ProxyHandler({'http':proxy_info})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent','Mozilla/5.0')]
urllib2.install_opener(opener)
req = urllib2.Request("http://www.google.com")
sock=urllib2.urlopen(req, timeout= 7)
rs = sock.read(1000)
if '<title>Google</title>' in rs:
oq.put(proxy_info)
print '[+] alive proxy' , proxy_info
saveAlive.write(line)
saveAlive.close()
except urllib2.HTTPError,e:
print 'url open error? slow?'
pass
except Exception,detail:
print '[-] bad proxy' ,proxy_info
else:
# gotta be socks
try:
s = socks.socksocket()
if proxy_type == "s4":
t = socks.PROXY_TYPE_SOCKS4
else:
t = socks.PROXY_TYPE_SOCKS5
ip,port = proxy_info.split(':')
s.setproxy(t,ip,int(port))
s.connect((url,80))
oq.put(proxy_info)
print proxy_info
except Exception,error:
print proxy_info
threading.Thread(target=writer,args=(open(output_file,"wb"),output_queue)).start()
for i in xrange(threads):
threading.Thread(target=checker,args=(check_queue,output_queue)).start()
for line in open(input_file).readlines():
check_queue.put(line.strip('\n'))
print "File reading done"
for i in xrange(threads):
check_queue.put(None)
raw_input("PRESS ENTER TO QUIT")
sys.exit(0)
Is this what you want?
#!/usr/bin/env python
import Queue
import threading
import urllib2
import time
input_file = 'proxylist.txt'
threads = 10
queue = Queue.Queue()
output = []
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
#grabs host from queue
proxy_info = self.queue.get()
try:
proxy_handler = urllib2.ProxyHandler({'http':proxy_info})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent','Mozilla/5.0')]
urllib2.install_opener(opener)
req = urllib2.Request("http://www.google.com")
sock=urllib2.urlopen(req, timeout= 7)
rs = sock.read(1000)
if '<title>Google</title>' in rs:
output.append(('0',proxy_info))
else:
raise "Not Google"
except:
output.append(('x',proxy_info))
#signals to queue job is done
self.queue.task_done()
start = time.time()
def main():
#spawn a pool of threads, and pass them queue instance
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
hosts = [host.strip() for host in open(input_file).readlines()]
#populate queue with data
for host in hosts:
queue.put(host)
#wait on the queue until everything has been processed
queue.join()
main()
for proxy,host in output:
print proxy,host
print "Elapsed Time: %s" % (time.time() - start)
i have some text file which contain proxy ip .
which look like following
130.14.29.111:80
130.14.29.120:80
130.159.235.31:80
14.198.198.220:8909
141.105.26.183:8000
160.79.35.27:80
164.77.196.75:80
164.77.196.78:45430
164.77.196.78:80
173.10.134.173:8081
174.132.145.80:80
174.137.152.60:8080
174.137.184.37:8080
174.142.125.161:80
after processing check this proxy , then i want to marked as following
total number of '0' = 8
total number of 'x' = 6
percentage = alive 60% , dead 40%
x 130.14.29.111:80
0 130.14.29.120:80
0 130.159.235.31:80
0 14.198.198.220:8909
0 141.105.26.183:8000
0 160.79.35.27:80
x 164.77.196.75:80
x 164.77.196.78:45430
x 164.77.196.78:80
0 173.10.134.173:8081
0 174.132.145.80:80
0 174.137.152.60:8080
x 174.137.184.37:8080
x 174.142.125.161:80
how can be done with python? or some sample
if anyone would help me or enlight me much aprreciate!
i was edited
this is script source of what i have
finally check finished proxy list are saved to 'proxy_alive.txt'
in this file i want to mark whether proxy element alive or not.
import socket
import urllib2
import threading
import sys
import Queue
import socket
socket.setdefaulttimeout(7)
print "Bobng's proxy checker. Using %s second timeout"%(socket.getdefaulttimeout())
#input_file = sys.argv[1]
#proxy_type = sys.argv[2] #options: http,s4,s5
#output_file = sys.argv[3]
input_file = 'proxylist.txt'
proxy_type = 'http'
output_file = 'proxy_alive.txt'
url = "www.seemyip.com" # Don't put http:// in here, or any /'s
check_queue = Queue.Queue()
output_queue = Queue.Queue()
threads = 20
def writer(f,rq):
while True:
line = rq.get()
f.write(line+'\n')
def checker(q,oq):
while True:
proxy_info = q.get() #ip:port
if proxy_info == None:
print "Finished"
#quit()
return
#print "Checking %s"%proxy_info
if proxy_type == 'http':
try:
listhandle = open("proxylist.txt").read().split('\n')
for line in listhandle:
saveAlive = open("proxy_alive.txt", 'a')
details = line.split(':')
email = details[0]
password = details[1].replace('\n', '')
proxy_handler = urllib2.ProxyHandler({'http':proxy_info})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent','Mozilla/5.0')]
urllib2.install_opener(opener)
req = urllib2.Request("http://www.google.com")
sock=urllib2.urlopen(req, timeout= 7)
rs = sock.read(1000)
if '<title>Google</title>' in rs:
oq.put(proxy_info)
print '[+] alive proxy' , proxy_info
saveAlive.write(line)
saveAlive.close()
except urllib2.HTTPError,e:
print 'url open error? slow?'
pass
except Exception,detail:
print '[-] bad proxy' ,proxy_info
else:
# gotta be socks
try:
s = socks.socksocket()
if proxy_type == "s4":
t = socks.PROXY_TYPE_SOCKS4
else:
t = socks.PROXY_TYPE_SOCKS5
ip,port = proxy_info.split(':')
s.setproxy(t,ip,int(port))
s.connect((url,80))
oq.put(proxy_info)
print proxy_info
except Exception,error:
print proxy_info
threading.Thread(target=writer,args=(open(output_file,"wb"),output_queue)).start()
for i in xrange(threads):
threading.Thread(target=checker,args=(check_queue,output_queue)).start()
for line in open(input_file).readlines():
check_queue.put(line.strip('\n'))
print "File reading done"
for i in xrange(threads):
check_queue.put(None)
raw_input("PRESS ENTER TO QUIT")
sys.exit(0)
You can use a queue to queue all the list of address and their meta information.
After you are done with your operation on this ip addresses, you can write it back to the same file with 'w' mode.
[ ( ip-address-1,'x' ), ( ip-address-2, '0'), ...... ]
I am trying to figure out how to get my client to send and receive data 'simultaneously' and am using threads. My problem is that, depending on the way I set it up, the way here it waits for data from the server in the recieveFromServer function which is in its own thread and cannot stop it when nothing will be sent. The other way it just waits for user input, and will send to the server and then I'd call the function recieveFromServer after the client sends a message to the server which doesn't allow for fluent communication, but cannot get it to alternate automatically. How do I release the thread when the client has nothing to be sent, or there is no more to be received from the server.
It would get to long if I tried to explain everything I have tried. :)
Thanks.
The client:
from socket import *
from threading import *
import thread
import time
from struct import pack,unpack
from networklingo import *
#from exception import *
HOST = '192.168.0.105'
PORT = 21567
BUFFSIZE = 1024
ADDR = (HOST,PORT)
lock = thread.allocate_lock()
class TronClient:
def __init__(self,control=None):
self.tcpSock = socket(AF_INET,SOCK_STREAM)
#self.tcpSock.settimeout(.2)
self.recvBuff = []
def connect(self):
self.tcpSock.connect(ADDR)
self.clientUID = self.tcpSock.recv(BUFFSIZE)
print 'My clientUID is ', self.clientUID
t = Thread(target = self.receiveFromSrv())
t.setDaemon(1)
t.start()
print 'going to main loop'
self.mainLoop()
#t = Thread(target = self.mainLoop())
#t.setName('mainLoop')
#t.setDaemon(1)
#t.start()
def receiveFromSrv(self):
RECIEVING = 1
while RECIEVING:
#print 'Attempting to retrieve more data'
#lock.acquire()
#print 'Lock Aquired in recieveFromSrv'
#try:
data = self.tcpSock.recv(BUFFSIZE)
#except socket.timeout,e:
#print 'Error recieving data, ',e
#continue
#print data
if not data: continue
header = data[:6]
msgType,msgLength,clientID = unpack("hhh",header)
print msgType
print msgLength
print clientID,'\n'
msg = data[6:]
while len(msg) < msgLength:
data = self.tcpSock.recv(BUFFSIZE)
dataLen = len(data)
if dataLen <= msgLength:
msg += data
else:
remLen = msgLength-len(data) #we just need to retrieve first bit of data to complete msg
msg += data[:remLen]
self.recvBuff.append(data[remLen:])
print msg
#else:
#lock.release()
# print 'lock release in receiveFromSrv'
#time.sleep(2)
#RECIEVING = 0
def disconnect(self,data=''):
self.send(DISCONNECT_REQUEST,data)
#self.tcpSock.close()
def send(self,msgType,msg):
header = pack("hhh",msgType,len(msg),self.clientUID)
msg = header+msg
self.tcpSock.send(msg)
def mainLoop(self):
while 1:
try:
#lock.acquire()
#print 'lock aquired in mainLoop'
data = raw_input('> ')
except EOFError: # enter key hit without any data (blank line) so ignore and continue
continue
#if not data or data == '': # no valid data so just continue
# continue
if data=='exit': # client wants to disconnect, so send request to server
self.disconnect()
break
else:
self.send(TRON_CHAT,data)
#lock.release()
#print 'lock released in main loop'
#self.recieveFromSrv()
#data = self.tcpSock.recv(BUFFSIZE)
#t = Thread(target = self.receiveFromSrv())
#t.setDaemon(1)
#t.start()
if __name__ == "__main__":
cli = TronClient()
cli.connect()
#t = Thread(target = cli.connect())
#t.setName('connect')
#t.setDaemon(1)
#t.start()
The server (uses a lock when incrementing or decrementing number of clients):
from socket import *
from threading import *
import thread
from controller import *
from networklingo import *
from struct import pack,unpack
HOST = ''
PORT = 21567
BUFSIZE = 1024
ADDR = (HOST,PORT)
nclntlock = thread.allocate_lock()
class TronServer:
def __init__(self,maxConnect=4,control=None):
self.servSock = socket(AF_INET,SOCK_STREAM)
# ensure that you can restart server quickly when it terminates
self.servSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
self.servSock.bind(ADDR)
self.servSock.listen(maxConnect)
# keep track of number of connected clients
self.clientsConnected = 0
# give each client a unique identfier for this run of server
self.clientUID = 0
# list of all clients to cycle through for sending
self.allClients = {}
# keep track of threads
self.cliThreads = {}
#reference back to controller
self.controller = control
self.recvBuff = []
def removeClient(self,clientID,addr):
if clientID in self.allClients.keys():
self.allClients[clientID].close()
print "Disconnected from", addr
nclntlock.acquire()
self.clientsConnected -= 1
nclntlock.release()
del self.allClients[clientID]
else:
print 'ClientID is not valid'
def recieve(self,clientsock,addr):
RECIEVING = 1
# loop serving the new client
while RECIEVING: # while PLAYING???
try:
data = clientsock.recv(BUFSIZE)
except:
RECIEVING = 0
continue
# if not data: break #no data was recieved
if data != '':
print 'Recieved msg from client: ',data
header = data[:6]
msgType,msgLength,clientID = unpack("hhh",header)
print msgType
print msgLength
print clientID,'\n'
if msgType == DISCONNECT_REQUEST: #handle disconnect request
self.removeClient(clientID,addr)
else: #pass message type and message off to controller
msg = data[6:]
while len(msg) < msgLength:
data = self.tcpSock.recv(BUFSIZE)
dataLen = len(data)
if dataLen <= msgLength:
msg += data
else:
remLen = msgLength-len(data) #we just need to retrieve first bit of data to complete msg
msg += data[:remLen]
self.recvBuff.append(data[remLen:])
print msg
# echo back the same data you just recieved
#clientsock.sendall(data)
self.send(TRON_CHAT,msg,-1) #send to client 0
for k in self.allClients.keys():
if self.allClients[k] == clientsock:
self.removeClient(k,addr)
print 'deleted after hard exit from clientID ', k
#self.cliThreads[k].join()
#del self.cliThreads[k]
# then tell controller to delete player with k
break
def send(self,msgType,msg,clientID=-1):
header = pack("hhh",msgType,len(msg),clientID)
msg = header+msg
if clientID in self.allClients:
self.allClients[clientID].send(msg)
elif clientID==ALL_PLAYERS:
for k in self.allClients.keys():
self.allClients[k].send(msg)
def mainLoop(self):
global nclntlock
try:
while self.controller != None and self.controller.state == WAITING:
print 'awaiting connections'
clientsock, caddy = self.servSock.accept()
nclntlock.acquire()
self.clientsConnected += 1
nclntlock.release()
print 'Client ',self.clientUID,' connected from:',caddy
clientsock.setblocking(0)
clientsock.send(str(self.clientUID))
self.allClients[self.clientUID] = clientsock
t = Thread(target = self.recieve, args = [clientsock,caddy])
t.setName('recieve-' + str(self.clientUID))
self.cliThreads[self.clientUID] = t
self.clientUID += 1
# t.setDaemon(1)
t.start()
finally:
self.servSock.close()
if __name__ == "__main__":
serv = TronServer(control = LocalController(nPlayers = 3, fWidth = 70, fHeight = 10))
t = Thread(target = serv.mainLoop())
t.setName('mainLoop')
# t.setDaemon(1)
t.start()
I think you want to try and set the socket to non-blocking mode:
http://docs.python.org/library/socket.html#socket.socket.setblocking
Set blocking or non-blocking mode of
the socket: if flag is 0, the socket
is set to non-blocking, else to
blocking mode. Initially all sockets
are in blocking mode. In non-blocking
mode, if a recv() call doesn’t find
any data, or if a send() call can’t
immediately dispose of the data, a
error exception is raised; in blocking
mode, the calls block until they can
proceed. s.setblocking(0) is
equivalent to s.settimeout(0);
s.setblocking(1) is equivalent to
s.settimeout(None).
Also, instead of using raw sockets, have you considdered using the multiprocessing module. It is a higher-level abstraction for doing network IO. The section on Pipes & Queues is specific to sending and receiving data between a client/server.