Bloomberg Api for Python: Parts of result missing in response - python

I'm using bloomberg api for python to get the option data. Firstly, I got all the symbols of option chain. Then I used them to get the bid and ask prices. Through function getOptionChain, there are more than 400 options and I checked the result , it was fine. However, when I run the getPX function, I got only 10 results in the end. Could anyone help me looking into this? Thanks in advance!
import blpapi
import pandas
import csv
options = blpapi.SessionOptions()
options.setServerHost('localhost')
options.setServerPort(8194)
SECURITY_DATA = blpapi.Name("securityData")
SECURITY = blpapi.Name("security")
FIELD_DATA = blpapi.Name("fieldData")
FIELD_ID = blpapi.Name("fieldId")
OPT_CHAIN = blpapi.Name("OPT_CHAIN")
SECURITY_DES = blpapi.Name("Security Description")
def getOptionChain (sec_list):
session = blpapi.Session(options)
session.start()
session.openService('//blp/refdata')
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("ReferenceDataRequest")
for s in sec_list:
request.append("securities",s)
request.append("fields", "OPT_CHAIN")
cid = session.sendRequest(request)
try:
# Process received events
while(True):
# We provide timeout to give the chance to Ctrl+C handling:
ev = session.nextEvent(500)
response = []
for msg in ev:
if cid in msg.correlationIds():
securityDataArray = msg.getElement(SECURITY_DATA)
for securityData in securityDataArray.values():
fieldData = securityData.getElement(FIELD_DATA)
for field in fieldData.elements():
for n in range(field.numValues()):
fld = field.getValueAsElement(n)
response.append (fld.getElement(SECURITY_DES).getValueAsString())
# Response completely received, so we could exit
if ev.eventType() == blpapi.Event.RESPONSE:
break
finally:
# Stop the session
session.stop()
return response
def getPX (sec_list, fld_list):
opt_chain_list = getOptionChain(sec_list)
session = blpapi.Session(options)
session.start()
session.openService('//blp/refdata')
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("ReferenceDataRequest")
for s in opt_chain_list:
request.append("securities",s)
for f in fld_list:
request.append("fields",f)
cid = session.sendRequest(request)
try:
# Process received events
while(True):
# We provide timeout to give the chance to Ctrl+C handling:
ev = session.nextEvent(500)
response = {}
for msg in ev:
if cid in msg.correlationIds():
securityDataArray = msg.getElement(SECURITY_DATA)
for securityData in securityDataArray.values():
secName = securityData.getElementAsString(SECURITY)
fieldData = securityData.getElement(FIELD_DATA)
response[secName] = {}
for field in fieldData.elements():
response[secName][field.name()] = field.getValueAsFloat()
# Response completely received, so we could exit
if ev.eventType() == blpapi.Event.RESPONSE:
break
finally:
# Stop the session
session.stop()
tempdict = {}
for r in response:
tempdict[r] = pandas.Series(response[r])
data = pandas.DataFrame(tempdict)
return data
sec = ["IBM US Equity"]
fld = ["PX_ASK","PX_BID"]
getPX(sec,fld)

It looks like you've got the "response = {}" in the wrong place.
Currently you're clearing at each iteration of your loop so each event coming in refills it.
If you shift the "response = {}" to just before "While(True):" each iteration will append to it rather than clearing and refilling.
The same is true of the first function, but the bulk data comes back in a single event in this case. If you were using multiple securities you would see the same issue (a single Bloomberg refdata (partial) response contains data for at most 10 securities).

Related

Python Sockets: my recv protocol receive all the data when i debug it but not when i run it doesnt

So my project is that I need to send a jpg image from one computer to another computer in the same network. To send the data I split the data into chunks of at least 9999 bytes and then I create a length header that tells the length of the data and I attach it to the start of the massage. here is the code:
the protocol:
import os.path
LENGTH_FIELD_SIZE = 4
PORT = 8820
COMANDS_LIST = "TAKE_SCREENSHOT\nSEND_PHOTO\nDIR\nDELETE\nCOPY\nEXECUTE\nEXIT".split("\n")
def check_cmd(data):
"""
Check if the command is defined in the protocol, including all parameters
For example, DELETE c:\work\file.txt is good, but DELETE alone is not
"""
command = ""
file_location =""
splited_data = data.split(maxsplit=1)
if len(splited_data) == 2:
command, file_location = splited_data
return (command in COMANDS_LIST) and (file_location is not None)
elif len(splited_data) == 1:
command = splited_data[0]
return command in ["TAKE_SCREENSHOT","EXIT","SEND_PHOTO"]
return False
# (3)
def create_msg(data):
"""
Create a valid protocol message, with length field
"""
data_len = len(str(data))
if data_len > 9999 or data_len == 0:
print(f"data len is bigger then 9999 or is 0, data len = {data_len} ")
return False
len_field = str(data_len).zfill(4)
# (4)
print(len_field)
return True ,f"{len_field}{data}"
def get_msg(my_socket):
"""
Extract message from protocol, without the length field
If length field does not include a number, returns False, "Error"
"""
lenght_field = ""
data = ""
try:
while len(lenght_field) < 4:
lenght_field += my_socket.recv(4).decode()
except RuntimeError as exc_run:
return False, "header wasnt sent properly"
if not lenght_field.isdigit():
return False, "error, length header is not valid"
lenght_field = lenght_field.lstrip("0")
while len(data) < int(lenght_field):
data += my_socket.recv(int(lenght_field)).decode()
return True, data
now the protocol works fine when I use the same computer for both server and client and when I debug get_msg on the other computer. when I'm not, it seems that the problem is that the part that recv the header will recv something else after a few successful recv and return an error message.
here are the server parts:
import socket
import pyautogui as pyautogui
import protocol
import glob
import os.path
import shutil
import subprocess
import base64
IP = "0.0.0.0"
PORT = 8820
PHOTO_PATH = r"C:\Users\Innon\Pictures\Screenshots\screenShot.jpg"# The path + filename where the screenshot at the server should be saved
def check_client_request(cmd):
"""
Break cmd to command and parameters
Check if the command and params are good.
For example, the filename to be copied actually exists
Returns:
valid: True/False
command: The requested cmd (ex. "DIR")
params: List of the cmd params (ex. ["c:\\cyber"])
"""
# Use protocol.check_cmd first
cmd_arr = cmd.split(maxsplit=1)
command = cmd_arr[0]
file_location = None
if len(cmd_arr) == 2:
file_location = cmd_arr[1]
if file_location == None:
return protocol.check_cmd(cmd) ,command, file_location
else:
file_location = tuple(str(file_location).split())
if (os.path.exists(file_location[0])):
return protocol.check_cmd(cmd) , command , file_location
return False , command , file_location
# Then make sure the params are valid
# (6)
def handle_client_request(command,params):
"""Create the response to the client, given the command is legal and params are OK
For example, return the list of filenames in a directory
Note: in case of SEND_PHOTO, only the length of the file will be sent
Returns:
response: the requested data
"""
# (7)
response = "no server response"
if command == "DIR":
response = glob.glob(f"{params[0]}\\*.*" )
if command == "DELETE":
os.remove(params[0])
response = f"{params[0]} was deleted"
if command == "COPY":
try:
shutil.copy(params[0],params[1])
response = f"{params[0]} was copyed to {params[1]}"
except FileNotFoundError as ex1:
response = ex1
except IndexError as ex2:
response = ex2
if command == "EXECUTE":
subprocess.call(params[0])
response = f"{params[0]} was executed"
if command == "TAKE_SCREENSHOT":
#todo find a way to know and create the locatipn of screen shot to be saved
myScreenshot = pyautogui.screenshot()
myScreenshot.save(PHOTO_PATH)
response = f"screen shot have been taken and been saved at {PHOTO_PATH}"
if command == "SEND_PHOTO":
with open(PHOTO_PATH, "rb") as file:
file_data = base64.b64encode(file.read()).decode()
print(file_data)
is_vaild_response, img_length = protocol.create_msg(len(file_data))
print(img_length)
img_data = ""
if not is_vaild_response:
response = "img length data isnt valid"
return response
while len(file_data) > 0:
chunk_data = file_data[:9999]
is_vaild_response, data = protocol.create_msg(chunk_data)
if not is_vaild_response:
response = "img data isnt valid"
return response
img_data += data
file_data = file_data[9999:]
response = f"{img_length}{img_data}"
return response
def main():
# open socket with client
server_socket = socket.socket()
server_socket.bind((IP,PORT))
server_socket.listen(1)
# (1)
client_socket, addr = server_socket.accept()
# handle requests until user asks to exit
while True:
# Check if protocol is OK, e.g. length field OK
valid_protocol, cmd = protocol.get_msg(client_socket)
print(f"got message {valid_protocol}")
if valid_protocol:
# Check if params are good, e.g. correct number of params, file name exists
valid_cmd, command, params = check_client_request(cmd)
print(f"check_client_request {valid_cmd}")
if valid_cmd:
# (6)
if command == 'EXIT':
break
if command == 'SEND_PHOTO':
data = handle_client_request(command, params)
client_socket.sendall(data.encode())
continue
# prepare a response using "handle_client_request"
data = handle_client_request(command,params)
# add length field using "create_msg"
is_vaild_response , response = protocol.create_msg(data)
print(f"creat_msg {is_vaild_response}")
# send to client
if is_vaild_response:
client_socket.sendall(response.encode())
else:
# prepare proper error to client
resp = 'Bad command or parameters'
is_vaild_response , response = protocol.create_msg(resp)
# send to client
client_socket.sendall(response.encode())
else:
# prepare proper error to client
resp = 'Packet not according to protocol'
is_vaild_response, response = protocol.create_msg(resp)
#send to client
client_socket.sendall(response.encode())
# Attempt to clean garbage from socket
client_socket.recv(1024)
# close sockets
resp = "Closing connection"
print(resp)
is_vaild_response, response = protocol.create_msg(resp)
client_socket.sendall(response.encode())
client_socket.close()
server_socket.close()
if __name__ == '__main__':
main()
and the client:
import socket
import base64
import protocol
IP = "127.0.0.1"
SAVED_PHOTO_LOCATION = r'C:\Users\Innon\Pictures\Saved Pictures\screenShot.jpg' # The path + filename where the copy of the screenshot at the client should be saved
def handle_server_response(my_socket, cmd):
"""
Receive the response from the server and handle it, according to the request
For example, DIR should result in printing the contents to the screen,
Note- special attention should be given to SEND_PHOTO as it requires and extra receive
"""
# (8) treat all responses except SEND_PHOTO
if "SEND_PHOTO" not in cmd:
vaild_data, data = protocol.get_msg(my_socket)
if vaild_data:
return data
# (10) treat SEND_PHOTO
else:
pic_data = ""
vaild_pick_len, pic_len = protocol.get_msg(my_socket)
if pic_len.isdigit() == False:
print(f"picture length is not valid. got massage: {pic_len}")
return
with open(SAVED_PHOTO_LOCATION, "wb") as file:
while len(pic_data) < int(pic_len):
vaild_data, data = protocol.get_msg(my_socket)
if not vaild_data:
return f"img data isnt valid. {data}"
pic_data += data
print(pic_data)
file.write(base64.b64decode(pic_data.encode()))
return "img was recived succesfully "
def main():
# open socket with the server
my_socket = socket.socket()
my_socket.connect((IP,8820))
# (2)
# print instructions
print('Welcome to remote computer application. Available commands are:\n')
print('TAKE_SCREENSHOT\nSEND_PHOTO\nDIR\nDELETE\nCOPY\nEXECUTE\nEXIT')
# loop until user requested to exit
while True:
cmd = input("Please enter command:\n")
if protocol.check_cmd(cmd):
valid_pack , packet = protocol.create_msg(cmd)
if valid_pack:
my_socket.sendall(packet.encode())
print(handle_server_response(my_socket, cmd))
if cmd == 'EXIT':
break
else:
print("Not a valid command, or missing parameters\n")
my_socket.close()
if __name__ == '__main__':
main()
here is how the problem looks like:thi is how it looks
here is how to needs look like:
the right way
thank you.
the solution was to change get_msg function in the protocol:
while len(data) < int(lenght_field):
data += my_socket.recv(int(lenght_field) - len(data)).decode()
instead of:
while len(data) < int(lenght_field):
data += my_socket.recv(int(lenght_field)).decode()

How to paginate asyncio requests in python where total is provided in header

*Just to clarify in advance, I use Postman to test my requests and they return the results im looking for.
I'm connecting to an API using Python. The API will only return 500 records per request and it will provide the total number of records in the first response header 'x-test-count'
I'm obviously not python savvy and feel that im handling pagination completely wrong. Take a look at the async get function. Basically, it takes the total count from the first response and loops through running
async with session.get(paging_url) as response:
page_results = await response.json()
pages.extend(page_results)
It does return results but only 500. So it would seem that its not capturing each iteration.
class Queue:
def __init__(self, id, type):
self.id = id
self.type = type
self.requests = []
class Test:
def __init__(self):
self.queue = []
self.queue_list = []
self.coroutines = []
self.headers = {
'Content-Type': 'application/json',
'x-test-token': self.token,
}
def get_id(self, type=''):
id = datetime.now().strftime('%Y%m-%d%H-%M%S-') + str(uuid4())
if type != '':
id = type + '-' + id
return id
def url_encode(self, url):
# doesn't like encoding urls using yarl. I'm manually handling them below with UTF-8 encode
url = url.replace(' ', '%20')
#url = url.replace('?', '%3F')
return url
def queue_create(self, type=''):
id = self.get_id(type='queue')
if type == '':
self.debug('Error: queue_create was not given a type')
return
id = Queue(id=id, type=type)
self.debug('queue_create instantiated new queue class named: ' + id)
# TODO: Add to list of active queues to track for create and destroy
# Return name of new object
return id
def queue_run(self, name=''):
self.debug('Starting queue_run')
if name == '':
self.debug('Error: queue_run asked to run without providing a name')
#return
**async def get(url, headers):
async with aiohttp.ClientSession(headers=headers, connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(url) as response:
self.debug('HTTP Response: ' + str(response.status))
# Set pagination vars to 1
current_page = 1
page_range = 1
# Check the status code. If other than 200, stop
assert response.status == 200
# Get the count of records. If not provided, set last_page to 1
try:
page_range = int(response.headers['x-test-count'])
self.debug(response.headers['x-test-count'])
except:
self.debug('x-test-count not provided, defaulted to 1')
first_page_results = await response.json()
if page_range == 1:
self.debug('Returning first page results only')
return first_page_results
else:
self.debug('Total results: ' + str(page_range) + '. Performing additional requests.')
pages = []
for records in range(1,page_range,500):
remaining_records = page_range - records
if remaining_records > 500:
paging_size = 500
else:
paging_size = remaining_records
# Create the paging URL
paging_url = url + '&size=' + str(paging_size) + '&from=' + str(records)
# Run paged requests
async with session.get(paging_url) as response:
page_results = await response.json()
# combine paged requests
pages.extend(page_results)
# Clear paging URL
paging_url = ''
return pages**
# Establish the loop
loop = asyncio.get_event_loop()
# Establish coroutines and populate with queries from queue
coroutines = []
for query in self.queue:
# Removed a lot of the actual code here. Basically, this establishes the URL and appends coroutines
coroutines.append(get(url, headers=headers))
# Start the asyncio loop
results = loop.run_until_complete(asyncio.gather(*coroutines))
return results
def add_request(self, type, endpoint, query='', deleted=False, data='', full=False, paging_size='', paging_from=''):
self.debug('Starting add_request')
self.debug('Queue before append: ', item=self.queue)
self.queue.append([type, endpoint, query, deleted, data, full, paging_size, paging_from])
self.debug('Queue after append: ', item=self.queue)
return self.queue
So to run, it looks something like this
Test = Test()
Test.add_request('read', 'personnel', '', full=True ,deleted=False)
response = Test.queue_run()

Error Accessing Data in Message Element

I have an issue trying to process a ReferenceDataRequest. Here is all the code I am using to fill in a session.
global options
options = parseCmdLine()
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
session = blpapi.Session(sessionOptions)
if not session.start():
print ("Failed to start session.")
return False
if not session.openService("//blp/refdata"):
print ("Failed to open //blp/refdata")
return False
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("ReferenceDataRequest")
request.getElement("securities").appendValue("UX1 Index")
request.getElement("fields").appendValue("FUT_DAYS_EXPIRE")
cid = session.sendRequest(request)
while(True):
ev = session.nextEvent(500)
for msg in ev:
if cid in msg.correlationIds():
# print(msg)
data = msg.getElement("securityData").getElement("fieldData")
if ev.eventType() == blpapi.Event.RESPONSE:
break
print(data)
session.stop()
I am getting this error. blpapi.exception.UnknownErrorException: Attempt access name 'fieldData' on array element 'securityData' (0x00000003).
This is what the msg received looks like.
ReferenceDataResponse = {
securityData[] = {
securityData = {
security = "UX1 Index"
eidData[] = {
}
fieldExceptions[] = {
}
sequenceNumber = 0
fieldData = {
FUT_DAYS_EXPIRE = 27
}
}
}
}
Is there anyway to fix this issue?
The solution is to just narrow down the msg elements and values. Took a lot of testing but this is the solution to my specific problem.
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("ReferenceDataRequest")
request.getElement("securities").appendValue("UX1 Index")
request.getElement("fields").appendValue("FUT_DAYS_EXPIRE")
cid = session.sendRequest(request)
while(True):
ev = session.nextEvent(500)
for msg in ev:
if cid in msg.correlationIds():
data = msg.getElement("securityData").getValue().getElement("fieldData").getElement("FUT_DAYS_EXPIRE").getValue()
if ev.eventType() == blpapi.Event.RESPONSE:
break
print(data)
session.stop()
Looking at the provided examples in the DAPI blpapi directory helped me.
You could also do
data = msg.getElement("securityData").getElement("securityData").getElement("fieldData").getElement("FUT_DAYS_EXPIRE").getValue().
I believe getValue() worked because there was only one value in the securityData[] array.

How to properly debug ThreadPool?

I'm trying to get some data from a web page. To speed up this process (they allow me to make 1000 requests per minute), I use ThreadPool.
Since there is a huge amount of data, the process is quite vulnerable to connection fails etc. so I try to log everything I can to be able to detect each mistake I did in code.
The problem is that program sometimes just stops without any exception (it acts like it is running but with no effect - I use PyCharm). I log catched exceptions everywhere I can but I can't see any exception in any log.
I assume that if there were a timeout reached, the exception would be raised and logged.
I've found out where the problem could be. Here is the code:
As a pool, I use: from multiprocessing.pool import ThreadPool as Pool
And lock: from threading import Lock
The download_category function is being used in loop.
def download_category(url):
# some code
#
# ...
log('Create pool...')
_pool = Pool(_workers_number)
with open('database/temp_produkty.txt') as f:
log('Spracovavanie produktov... vytvaranie vlakien...') # I see this in log
for url_product in f:
x = _pool.apply_async(process_product, args=(url_product.strip('\n'), url))
_pool.close()
_pool.join()
log('Presuvanie produktov z temp export do export.csv...') # I can't see this in log
temp_export_to_export_csv()
set_spracovanie_kategorie(url)
except Exception as e:
logging.exception('Got exception on download_one_category: {}'.format(url))
And process_product function:
def process_product(url, cat):
try:
data = get_product_data(url)
except:
log('{}: {} exception while getting product data... #') # I don't see this in log
return
try:
print_to_temp_export(data, cat) # I don't see this in log
except:
log('{}: {} exception while printing to csv... #') # I don't see this in log
raise
LOG function:
def log(text):
now = datetime.now().strftime('%d.%m.%Y %H:%M:%S')
_lock.acquire()
mLib.printToFile('logging/log.log', '{} -> {}'.format(now, text))
_lock.release()
I use logging module too. In this log, I see that probably 8 (number of workers) times request was sent but no answer hasn't been recieved.
EDIT1:
def get_product_data(url):
data = defaultdict(lambda: '-')
root = load_root(url)
try:
nazov = root.xpath('//h1[#itemprop="name"]/text()')[0]
except:
nazov = root.xpath('//h1/text()')[0]
under_block = root.xpath('//h2[#id="lowest-cost"]')
if len(under_block) < 1:
under_block = root.xpath('//h2[contains(text(),"Naj")]')
if len(under_block) < 1:
return False
data['nazov'] = nazov
data['url'] = url
blocks = under_block[0].xpath('./following-sibling::div[#class="shp"]/div[contains(#class,"shp")]')
i = 0
for block in blocks:
i += 1
data['dat{}_men'.format(i)] = eblock.xpath('.//a[#class="link"]/text()')[0]
del root
return data
LOAD ROOT:
class RedirectException(Exception):
pass
def load_url(url):
r = requests.get(url, allow_redirects=False)
if r.status_code == 301:
raise RedirectException
if r.status_code == 404:
if '-q-' in url:
url = url.replace('-q-','-')
mLib.printToFileWOEncoding('logging/neexistujuce.txt','Skusanie {} kategorie...'.format(url))
return load_url(url) # THIS IS NOT LOOPING
else:
mLib.printToFileWOEncoding('logging/neexistujuce.txt','{}'.format(url))
html = r.text
return html
def load_root(url):
try:
html = load_url(url)
except Exception as e:
logging.exception('load_root_exception')
raise
return etree.fromstring(html, etree.HTMLParser())

python Client hangs when no data to receive from server and hangs in that thread w/o letting client send

I am trying to figure out how to get my client to send and receive data 'simultaneously' and am using threads. My problem is that, depending on the way I set it up, the way here it waits for data from the server in the recieveFromServer function which is in its own thread and cannot stop it when nothing will be sent. The other way it just waits for user input, and will send to the server and then I'd call the function recieveFromServer after the client sends a message to the server which doesn't allow for fluent communication, but cannot get it to alternate automatically. How do I release the thread when the client has nothing to be sent, or there is no more to be received from the server.
It would get to long if I tried to explain everything I have tried. :)
Thanks.
The client:
from socket import *
from threading import *
import thread
import time
from struct import pack,unpack
from networklingo import *
#from exception import *
HOST = '192.168.0.105'
PORT = 21567
BUFFSIZE = 1024
ADDR = (HOST,PORT)
lock = thread.allocate_lock()
class TronClient:
def __init__(self,control=None):
self.tcpSock = socket(AF_INET,SOCK_STREAM)
#self.tcpSock.settimeout(.2)
self.recvBuff = []
def connect(self):
self.tcpSock.connect(ADDR)
self.clientUID = self.tcpSock.recv(BUFFSIZE)
print 'My clientUID is ', self.clientUID
t = Thread(target = self.receiveFromSrv())
t.setDaemon(1)
t.start()
print 'going to main loop'
self.mainLoop()
#t = Thread(target = self.mainLoop())
#t.setName('mainLoop')
#t.setDaemon(1)
#t.start()
def receiveFromSrv(self):
RECIEVING = 1
while RECIEVING:
#print 'Attempting to retrieve more data'
#lock.acquire()
#print 'Lock Aquired in recieveFromSrv'
#try:
data = self.tcpSock.recv(BUFFSIZE)
#except socket.timeout,e:
#print 'Error recieving data, ',e
#continue
#print data
if not data: continue
header = data[:6]
msgType,msgLength,clientID = unpack("hhh",header)
print msgType
print msgLength
print clientID,'\n'
msg = data[6:]
while len(msg) < msgLength:
data = self.tcpSock.recv(BUFFSIZE)
dataLen = len(data)
if dataLen <= msgLength:
msg += data
else:
remLen = msgLength-len(data) #we just need to retrieve first bit of data to complete msg
msg += data[:remLen]
self.recvBuff.append(data[remLen:])
print msg
#else:
#lock.release()
# print 'lock release in receiveFromSrv'
#time.sleep(2)
#RECIEVING = 0
def disconnect(self,data=''):
self.send(DISCONNECT_REQUEST,data)
#self.tcpSock.close()
def send(self,msgType,msg):
header = pack("hhh",msgType,len(msg),self.clientUID)
msg = header+msg
self.tcpSock.send(msg)
def mainLoop(self):
while 1:
try:
#lock.acquire()
#print 'lock aquired in mainLoop'
data = raw_input('> ')
except EOFError: # enter key hit without any data (blank line) so ignore and continue
continue
#if not data or data == '': # no valid data so just continue
# continue
if data=='exit': # client wants to disconnect, so send request to server
self.disconnect()
break
else:
self.send(TRON_CHAT,data)
#lock.release()
#print 'lock released in main loop'
#self.recieveFromSrv()
#data = self.tcpSock.recv(BUFFSIZE)
#t = Thread(target = self.receiveFromSrv())
#t.setDaemon(1)
#t.start()
if __name__ == "__main__":
cli = TronClient()
cli.connect()
#t = Thread(target = cli.connect())
#t.setName('connect')
#t.setDaemon(1)
#t.start()
The server (uses a lock when incrementing or decrementing number of clients):
from socket import *
from threading import *
import thread
from controller import *
from networklingo import *
from struct import pack,unpack
HOST = ''
PORT = 21567
BUFSIZE = 1024
ADDR = (HOST,PORT)
nclntlock = thread.allocate_lock()
class TronServer:
def __init__(self,maxConnect=4,control=None):
self.servSock = socket(AF_INET,SOCK_STREAM)
# ensure that you can restart server quickly when it terminates
self.servSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
self.servSock.bind(ADDR)
self.servSock.listen(maxConnect)
# keep track of number of connected clients
self.clientsConnected = 0
# give each client a unique identfier for this run of server
self.clientUID = 0
# list of all clients to cycle through for sending
self.allClients = {}
# keep track of threads
self.cliThreads = {}
#reference back to controller
self.controller = control
self.recvBuff = []
def removeClient(self,clientID,addr):
if clientID in self.allClients.keys():
self.allClients[clientID].close()
print "Disconnected from", addr
nclntlock.acquire()
self.clientsConnected -= 1
nclntlock.release()
del self.allClients[clientID]
else:
print 'ClientID is not valid'
def recieve(self,clientsock,addr):
RECIEVING = 1
# loop serving the new client
while RECIEVING: # while PLAYING???
try:
data = clientsock.recv(BUFSIZE)
except:
RECIEVING = 0
continue
# if not data: break #no data was recieved
if data != '':
print 'Recieved msg from client: ',data
header = data[:6]
msgType,msgLength,clientID = unpack("hhh",header)
print msgType
print msgLength
print clientID,'\n'
if msgType == DISCONNECT_REQUEST: #handle disconnect request
self.removeClient(clientID,addr)
else: #pass message type and message off to controller
msg = data[6:]
while len(msg) < msgLength:
data = self.tcpSock.recv(BUFSIZE)
dataLen = len(data)
if dataLen <= msgLength:
msg += data
else:
remLen = msgLength-len(data) #we just need to retrieve first bit of data to complete msg
msg += data[:remLen]
self.recvBuff.append(data[remLen:])
print msg
# echo back the same data you just recieved
#clientsock.sendall(data)
self.send(TRON_CHAT,msg,-1) #send to client 0
for k in self.allClients.keys():
if self.allClients[k] == clientsock:
self.removeClient(k,addr)
print 'deleted after hard exit from clientID ', k
#self.cliThreads[k].join()
#del self.cliThreads[k]
# then tell controller to delete player with k
break
def send(self,msgType,msg,clientID=-1):
header = pack("hhh",msgType,len(msg),clientID)
msg = header+msg
if clientID in self.allClients:
self.allClients[clientID].send(msg)
elif clientID==ALL_PLAYERS:
for k in self.allClients.keys():
self.allClients[k].send(msg)
def mainLoop(self):
global nclntlock
try:
while self.controller != None and self.controller.state == WAITING:
print 'awaiting connections'
clientsock, caddy = self.servSock.accept()
nclntlock.acquire()
self.clientsConnected += 1
nclntlock.release()
print 'Client ',self.clientUID,' connected from:',caddy
clientsock.setblocking(0)
clientsock.send(str(self.clientUID))
self.allClients[self.clientUID] = clientsock
t = Thread(target = self.recieve, args = [clientsock,caddy])
t.setName('recieve-' + str(self.clientUID))
self.cliThreads[self.clientUID] = t
self.clientUID += 1
# t.setDaemon(1)
t.start()
finally:
self.servSock.close()
if __name__ == "__main__":
serv = TronServer(control = LocalController(nPlayers = 3, fWidth = 70, fHeight = 10))
t = Thread(target = serv.mainLoop())
t.setName('mainLoop')
# t.setDaemon(1)
t.start()
I think you want to try and set the socket to non-blocking mode:
http://docs.python.org/library/socket.html#socket.socket.setblocking
Set blocking or non-blocking mode of
the socket: if flag is 0, the socket
is set to non-blocking, else to
blocking mode. Initially all sockets
are in blocking mode. In non-blocking
mode, if a recv() call doesn’t find
any data, or if a send() call can’t
immediately dispose of the data, a
error exception is raised; in blocking
mode, the calls block until they can
proceed. s.setblocking(0) is
equivalent to s.settimeout(0);
s.setblocking(1) is equivalent to
s.settimeout(None).
Also, instead of using raw sockets, have you considdered using the multiprocessing module. It is a higher-level abstraction for doing network IO. The section on Pipes & Queues is specific to sending and receiving data between a client/server.

Categories

Resources