Related
I have an Arduino 33 BLE that is updating a few bluetooth characteristics with a string representation of BNO055 sensor calibration and quaternion data. On the Arduino side, I see the calibration and quaternion data getting updated in a nice orderly sequence as expected.
I have a Python (3.9) program running on Windows 10 that uses asyncio to subscribe to the characteristics on the Arduino to read the updates. Everything works fine when I have an update rate on the Arduino of 1/second. By "works fine" I mean I see the orderly sequence of updates: quaternion, calibration, quaternion, calibration,.... The problem I have is that I changed the update rate to the 10/second (100ms delay in Arduino) and now I am getting, for example, 100 updates for quaternion data but only 50 updates for calibration data when the number of updates should be equal. Somehow I'm not handling the updates properly on the python side.
The python code is listed below:
import asyncio
import pandas as pd
from bleak import BleakClient
from bleak import BleakScanner
ardAddress = ''
found = ''
exit_flag = False
temperaturedata = []
timedata = []
calibrationdata=[]
quaterniondata=[]
# loop: asyncio.AbstractEventLoop
tempServiceUUID = '0000290c-0000-1000-8000-00805f9b34fb' # Temperature Service UUID on Arduino 33 BLE
stringUUID = '00002a56-0000-1000-8000-00805f9b34fb' # Characteristic of type String [Write to Arduino]
inttempUUID = '00002a1c-0000-1000-8000-00805f9b34fb' # Characteristic of type Int [Temperature]
longdateUUID = '00002a08-0000-1000-8000-00805f9b34fb' # Characteristic of type Long [datetime millis]
strCalibrationUUID = '00002a57-0000-1000-8000-00805f9b34fb' # Characteristic of type String [BNO055 Calibration]
strQuaternionUUID = '9e6c967a-5a87-49a1-a13f-5a0f96188552' # Characteristic of type Long [BNO055 Quaternion]
async def scanfordevices():
devices = await BleakScanner.discover()
for d in devices:
print(d)
if (d.name == 'TemperatureMonitor'):
global found, ardAddress
found = True
print(f'{d.name=}')
print(f'{d.address=}')
ardAddress = d.address
print(f'{d.rssi=}')
return d.address
async def readtemperaturecharacteristic(client, uuid: str):
val = await client.read_gatt_char(uuid)
intval = int.from_bytes(val, byteorder='little')
print(f'readtemperaturecharacteristic: Value read from: {uuid} is: {val} | as int={intval}')
async def readdatetimecharacteristic(client, uuid: str):
val = await client.read_gatt_char(uuid)
intval = int.from_bytes(val, byteorder='little')
print(f'readdatetimecharacteristic: Value read from: {uuid} is: {val} | as int={intval}')
async def readcalibrationcharacteristic(client, uuid: str):
# Calibration characteristic is a string
val = await client.read_gatt_char(uuid)
strval = val.decode('UTF-8')
print(f'readcalibrationcharacteristic: Value read from: {uuid} is: {val} | as string={strval}')
async def getservices(client):
svcs = await client.get_services()
print("Services:")
for service in svcs:
print(service)
ch = service.characteristics
for c in ch:
print(f'\tCharacteristic Desc:{c.description} | UUID:{c.uuid}')
def notification_temperature_handler(sender, data):
"""Simple notification handler which prints the data received."""
intval = int.from_bytes(data, byteorder='little')
# TODO: review speed of append vs extend. Extend using iterable but is faster
temperaturedata.append(intval)
#print(f'Temperature: Sender: {sender}, and byte data= {data} as an Int={intval}')
def notification_datetime_handler(sender, data):
"""Simple notification handler which prints the data received."""
intval = int.from_bytes(data, byteorder='little')
timedata.append(intval)
#print(f'Datetime: Sender: {sender}, and byte data= {data} as an Int={intval}')
def notification_calibration_handler(sender, data):
"""Simple notification handler which prints the data received."""
strval = data.decode('UTF-8')
numlist=extractvaluesaslist(strval,':')
#Save to list for processing later
calibrationdata.append(numlist)
print(f'Calibration Data: {sender}, and byte data= {data} as a List={numlist}')
def notification_quaternion_handler(sender, data):
"""Simple notification handler which prints the data received."""
strval = data.decode('UTF-8')
numlist=extractvaluesaslist(strval,':')
#Save to list for processing later
quaterniondata.append(numlist)
print(f'Quaternion Data: {sender}, and byte data= {data} as a List={numlist}')
def extractvaluesaslist(raw, separator=':'):
# Get everything after separator
s1 = raw.split(sep=separator)[1]
s2 = s1.split(sep=',')
return list(map(float, s2))
async def runmain():
# Based on code from: https://github.com/hbldh/bleak/issues/254
global exit_flag
print('runmain: Starting Main Device Scan')
await scanfordevices()
print('runmain: Scan is done, checking if found Arduino')
if found:
async with BleakClient(ardAddress) as client:
print('runmain: Getting Service Info')
await getservices(client)
# print('runmain: Reading from Characteristics Arduino')
# await readdatetimecharacteristic(client, uuid=inttempUUID)
# await readcalibrationcharacteristic(client, uuid=strCalibrationUUID)
print('runmain: Assign notification callbacks')
await client.start_notify(inttempUUID, notification_temperature_handler)
await client.start_notify(longdateUUID, notification_datetime_handler)
await client.start_notify(strCalibrationUUID, notification_calibration_handler)
await client.start_notify(strQuaternionUUID, notification_quaternion_handler)
while not exit_flag:
await asyncio.sleep(1)
# TODO: This does nothing. Understand why?
print('runmain: Stopping notifications.')
await client.stop_notify(inttempUUID)
print('runmain: Write to characteristic to let it know we plan to quit.')
await client.write_gatt_char(stringUUID, 'Stopping'.encode('ascii'))
else:
print('runmain: Arduino not found. Check that its on')
print('runmain: Done.')
def main():
# get main event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(runmain())
except KeyboardInterrupt:
global exit_flag
print('\tmain: Caught keyboard interrupt in main')
exit_flag = True
finally:
pass
print('main: Getting all pending tasks')
# From book Pg 26.
pending = asyncio.all_tasks(loop=loop)
print(f'\tmain: number of tasks={len(pending)}')
for task in pending:
task.cancel()
group = asyncio.gather(*pending, return_exceptions=True)
print('main: Waiting for tasks to complete')
loop.run_until_complete(group)
loop.close()
# Display data recorded in Dataframe
if len(temperaturedata)==len(timedata):
print(f'Temperature data len={len(temperaturedata)}, and len of timedata={len(timedata)}')
df = pd.DataFrame({'datetime': timedata,
'temperature': temperaturedata})
#print(f'dataframe shape={df.shape}')
#print(df)
df.to_csv('temperaturedata.csv')
else:
print(f'No data or lengths different: temp={len(temperaturedata)}, time={len(timedata)}')
if len(quaterniondata)==len(calibrationdata):
print('Processing Quaternion and Calibration Data')
#Load quaternion data
dfq=pd.DataFrame(quaterniondata,columns=['time','qw','qx','qy','qz'])
print(f'Quaternion dataframe shape={dfq.shape}')
#Add datetime millis data
#dfq.insert(0,'Time',timedata)
#Load calibration data
dfcal=pd.DataFrame(calibrationdata,columns=['time','syscal','gyrocal','accelcal','magcal'])
print(f'Calibration dataframe shape={dfcal.shape}')
#Merge two dataframes together
dffinal=pd.concat([dfq,dfcal],axis=1)
dffinal.to_csv('quaternion_and_cal_data.csv')
else:
print(f'No data or lengths different. Quat={len(quaterniondata)}, Cal={len(calibrationdata)}')
if len(quaterniondata)>0:
dfq = pd.DataFrame(quaterniondata, columns=['time', 'qw', 'qx', 'qy', 'qz'])
dfq.to_csv('quaterniononly.csv')
if len(calibrationdata)>0:
dfcal = pd.DataFrame(calibrationdata, columns=['time','syscal', 'gyrocal', 'accelcal', 'magcal'])
dfcal.to_csv('calibrationonly.csv')
print("main: Done.")
if __name__ == "__main__":
'''Starting Point of Program'''
main()
So, my first question is can anyone help me understand why I do not seem to be getting all the updates in my Python program? I should be seeing notification_quaternion_handler() and notification_calibration_handler() called the same number of times but I am not. I assume I am not using asyncio properly but I am at a loss to debug it at this point?
My second question is, are there best practices for trying to receive relatively high frequency updates from bluetooth, for example every 10-20 ms? I am trying to read IMU sensor data and it needs to be done at a fairly high rate.
This is my first attempt at bluetooth and asyncio so clearly I have a lot to learn.
Thank You for the help
Fantastic answer by #ukBaz.
In summary for other who may have a similar issue.
On the Arduino side I ended up with something like this (important parts only shown):
typedef struct __attribute__ ((packed)) {
unsigned long timeread;
int qw; //float Quaternion values will be scaled to int by multiplying by constant
int qx;
int qy;
int qz;
uint8_t cal_system;
uint8_t cal_gyro;
uint8_t cal_accel;
uint8_t cal_mag;
}sensordata ;
//Declare struct and populate
sensordata datareading;
datareading.timeread=tnow;
datareading.qw=(int) (quat.w()*10000);
datareading.qx=(int) (quat.x()*10000);
datareading.qy=(int) (quat.y()*10000);
datareading.qz=(int) (quat.z()*10000);
datareading.cal_system=system;
datareading.cal_gyro=gyro;
datareading.cal_accel=accel;
datareading.cal_mag=mag;
//Write values to Characteristics.
structDataChar.writeValue((uint8_t *)&datareading, sizeof(datareading));
Then on the Python (Windows Desktop) side I have this to unpack the data being sent:
def notification_structdata_handler(sender, data):
"""Simple notification handler which prints the data received."""
# NOTE: IT IS CRITICAL THAT THE UNPACK BYTE STRUCTURE MATCHES THE STRUCT
# CONFIGURATION SHOWN IN THE ARDUINO C PROGRAM.
# <hh meaning: <=little endian, h=short (2 bytes), b=1 byte, i=int 4 bytes, unsigned long = 4 bytes
#Scale factor used in Arduino to convert floats to ints.
scale=10000
# Main Sensor struct
t,qw,qx,qy,qz,cs,cg,ca,cm= struct.unpack('<5i4b', data)
sensorstructdata.append([t,qw/scale,qx/scale,qy/scale,qz/scale,cs,cg,ca,cm])
print(f'--->Struct Decoded. time={t}, qw={qw/scale}, qx={qx/scale}, qy={qy/scale}, qz={qz/scale},'
f'cal_s={cs}, cal_g={cg}, cal_a={ca}, cal_m={cm}')
Thanks for all the help and as promised the performance is MUCH better than what I started with!
You have multiple characteristics that are being updated at the same frequency. It is more efficient in Bluetooth Low Energy (BLE) to transmit those values in the same characteristic. The other thing I noticed is that you appear to be sending the value as a string. It looks like the string format might "key:value" by the way you are extracting information from the string. This is also inefficient way to send data via BLE.
The data that is transmitted over BLE is always a list of bytes so if a float is required, it needs to be changed into an integer to be sent as bytes. As an example, if we wanted to send a value with two decimal places, multiplying it by 100 would always remove the decimal places. To go the other way it would be divide by 100. e.g:
>>> value = 12.34
>>> send = int(value * 100)
>>> print(send)
1234
>>> send / 100
12.34
The struct library allows integers to be easily packed that into a series of byes to send. As an example:
>>> import struct
>>> value1 = 12.34
>>> value2 = 67.89
>>> send_bytes = struct.pack('<hh', int(value1 * 100), int(value2 * 100))
>>> print(send_bytes)
b'\xd2\x04\x85\x1a'
To then unpack that:
>>> r_val1, r_val2 = struct.unpack('<hh', send_bytes)
>>> print(f'Value1={r_val1/100} : Value2={r_val2/100}')
Value1=12.34 : Value2=67.89
Using a single characteristic with the minimum number of bytes being transmitted should allow for the faster notifications.
To look at how other characteristics do this then look at the following document from the Bluetooth SIG:
https://www.bluetooth.com/specifications/specs/gatt-specification-supplement-5/
A good example might be the Blood Pressure Measurement characteristic.
I've read the Google documentation and looked at their examples however have not managed to get this working correctly in my particular use case. The problem is that the packets of the audio stream are broken up into smaller chunks (frame size) base64 encoded and sent over MQTT - meaning that the generator approach is likely to stop part way through despite not being fully completed by the sender. My MicrophoneSender component will send the final part of the message with a segment_key = -1, so this is the flag that the complete message has been sent and that a full/final process of the stream can be completed. Prior to that point the buffer may not have all of the complete stream so it's difficult to get either a) the generator to stop yielding b) the google as to return a partial transcription. A partial transcription is required once every 10 or so frames.
To illustrate this better here is my code.
inside receiver:
STREAMFRAMETHRESHOLD = 10
def mqttMsgCallback(self, client, userData, msg):
if msg.topic.startswith("MicSender/stream"):
msgDict = json.loads(msg.payload)
streamBytes = b64decode(msgDict['audio_data'].encode('utf-8'))
frameNum = int(msgDict['segment_num'])
if frameNum == 0:
self.asr_time_start = time.time()
self.asr.endOfStream = False
if frameNum >= 0:
self.asr.store_stream_bytes(streamBytes)
self.asr.endOfStream = False
if frameNum % STREAMFRAMETHRESHOLD == 0:
self.asr.get_intermediate_and_print()
else:
#FINAL, recieved -1
trans = self.asr.finish_stream()
self.send_message(trans)
self.frameCount=0
inside Google Speech Class implementation:
class GoogleASR(ASR):
def __init__(self, name):
super().__init__(name)
# STREAMING
self.stream_buf = queue.Queue()
self.stream_gen = self.getGenerator(self.stream_buf)
self.endOfStream = True
self.requests = (types.StreamingRecognizeRequest(audio_content=chunk) for chunk in self.stream_gen)
self.streaming_config = types.StreamingRecognitionConfig(config=self.config)
self.current_transcript = ''
self.numCharsPrinted = 0
def getGenerator(self, buff):
while not self.endOfStream:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = buff.get(block=False)
data.append(chunk)
except queue.Empty:
self.endOfStream = True
yield b''.join(data)
break
yield b''.join(data)
def store_stream_bytes(self, bytes):
self.stream_buf.put(bytes)
def get_intermediate_and_print(self):
self.get_intermediate()
def get_intermediate(self):
if self.stream_buf.qsize() > 1:
print("stream buf size: {}".format(self.stream_buf.qsize()))
responses = self.client.streaming_recognize(self.streaming_config, self.requests)
# print(responses)
try:
# Now, put the transcription responses to use.
if not self.numCharsPrinted:
self.numCharsPrinted = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
self.current_transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (self.numCharsPrinted - len(self.current_transcript))
sys.stdout.write(self.current_transcript + overwrite_chars + '\r')
sys.stdout.flush()
self.numCharsPrinted = len(self.current_transcript)
def finish_stream(self):
self.endOfStream = False
self.get_intermediate()
self.endOfStream = True
final_result = self.current_transcript
self.stream_buf= queue.Queue()
self.allBytes = bytearray()
self.current_transcript = ''
self.requests = (types.StreamingRecognizeRequest(audio_content=chunk) for chunk in self.stream_gen)
self.streaming_config = types.StreamingRecognitionConfig(config=self.config)
return final_result
Currently what this does is output nothing from the transcriptions side.
stream buf size: 21
stream buf size: 41
stream buf size: 61
stream buf size: 81
stream buf size: 101
stream buf size: 121
stream buf size: 141
stream buf size: 159
But the response/transcript is empty. If I put a breakpoint on the for response in responses inside the get_intermediate function then it never runs which means that for some reason it's empty (not retuned from Google). However, if I put a breakpoint on the generator and take too long (> 5 seconds) to continue to yield the data, it (Google) tells me that the data is probably being sent to the server too slow. google.api_core.exceptions.OutOfRange: 400 Audio data is being streamed too slow. Please stream audio data approximately at real time.
Maybe someone can spot the obvious here...
The way you have organized your code, the generator you give to the Google API is initialized exactly once - on line 10, using a generator expression: self.requests = (...). As constructed, this generator will also run exactly once and become 'exhausted'. Same applies to the generator function that the (for ...) generator itself calls (self.getGeneerator()). It will run once only and stop when it retrieved 10 chunks of data (which are very small, from what I can see). Then, the outer generator (what you assigned to self.requests) will also stop forever - giving the ASR only a short bit of data (10 times 20 bytes, looking at the printed debug output). There's nothing recognizable in that, most likely.
BTW, note you have a redundant yield b''.join(data) in your function, the data will be sent twice.
You will need to redo the (outer) generator so it does not return until all data is received. If you want to use another generator as you do to gather each bigger chunk for the 'outer' generator from which the Google API is reading, you will need to re-make it every time you begin a new loop with it.
I am trying to import the snmpSessionBaseClass python module in a script I am running, but I do not have the module installed and I can't seem to find where to download it. Does anyone know the pip or yum command to download and install this module? Thanks!
import netsnmp
sys.path.insert(1, os.path.join(sys.path[0], os.pardir))
from snmpSessionBaseClass import add_common_options, get_common_options, verify_host, get_data
from pynag.Plugins import PluginHelper,ok,critical
The following code needs to be added to a file called snmpSessionBaseClass.py and that file needs to be placed in a directory that is in pythons path.
#!/usr/bin/env python
# Copyright (C) 2016 rsmuc <rsmuc#mailbox.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with health_monitoring_plugins. If not, see <http://www.gnu.org/licenses/>.
import pynag
import netsnmp
import os
import sys
dev_null = os.open(os.devnull, os.O_WRONLY)
tmp_stdout = os.dup(sys.stdout.fileno())
def dev_null_wrapper(func, *a, **kwargs):
"""
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null.
This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
"""
os.dup2(dev_null, sys.stdout.fileno())
return_object = func(*a, **kwargs)
sys.stdout.flush()
os.dup2(tmp_stdout, sys.stdout.fileno())
return return_object
def add_common_options(helper):
# Define the common command line parameters
helper.parser.add_option('-H', help="Hostname or ip address", dest="hostname")
helper.parser.add_option('-C', '--community', dest='community', help='SNMP community of the SNMP service on target host.', default='public')
helper.parser.add_option('-V', '--snmpversion', dest='version', help='SNMP version. (1 or 2)', default=2, type='int')
def get_common_options(helper):
# get the common options
host = helper.options.hostname
version = helper.options.version
community = helper.options.community
return host, version, community
def verify_host(host, helper):
if host == "" or host is None:
helper.exit(summary="Hostname must be specified"
, exit_code=pynag.Plugins.unknown
, perfdata='')
netsnmp_session = dev_null_wrapper(netsnmp.Session,
DestHost=helper.options.hostname,
Community=helper.options.community,
Version=helper.options.version)
try:
# Works around lacking error handling in netsnmp package.
if netsnmp_session.sess_ptr == 0:
helper.exit(summary="SNMP connection failed"
, exit_code=pynag.Plugins.unknown
, perfdata='')
except ValueError as error:
helper.exit(summary=str(error)
, exit_code=pynag.Plugins.unknown
, perfdata='')
# make a snmp get, if it fails (or returns nothing) exit the plugin
def get_data(session, oid, helper, empty_allowed=False):
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = session.get(varl)
value = data[0]
if value is None:
helper.exit(summary="snmpget failed - no data for host "
+ session.DestHost + " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
if not empty_allowed and not value:
helper.exit(summary="snmpget failed - no data for host "
+ session.DestHost + " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
return value
# make a snmp get, but do not exit the plugin, if it returns nothing
# be careful! This funciton does not exit the plugin, if snmp get fails!
def attempt_get_data(session, oid):
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = session.get(varl)
value = data[0]
return value
# make a snmp walk, if it fails (or returns nothing) exit the plugin
def walk_data(session, oid, helper):
tag = []
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = list(session.walk(varl))
if len(data) == 0:
helper.exit(summary="snmpwalk failed - no data for host " + session.DestHost
+ " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
for x in range(0, len(data)):
tag.append(varl[x].tag)
return data, tag
# make a snmp walk, but do not exit the plugin, if it returns nothing
# be careful! This function does not exit the plugin, if snmp walk fails!
def attempt_walk_data(session, oid):
tag = []
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = list(session.walk(varl))
for x in range(0, len(data)):
tag.append(varl[x].tag)
return data, tag
def state_summary(value, name, state_list, helper, ok_value = 'ok', info = None):
"""
Always add the status to the long output, and if the status is not ok (or ok_value),
we show it in the summary and set the status to critical
"""
# translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list
state_value = state_list[int(value)]
summary_output = ''
long_output = ''
if not info:
info = ''
if state_value != ok_value:
summary_output += ('%s status: %s %s ' % (name, state_value, info))
helper.status(pynag.Plugins.critical)
long_output += ('%s status: %s %s\n' % (name, state_value, info))
return (summary_output, long_output)
def add_output(summary_output, long_output, helper):
"""
if the summary output is empty, we don't add it as summary, otherwise we would have empty spaces (e.g.: '. . . . .') in our summary report
"""
if summary_output != '':
helper.add_summary(summary_output)
helper.add_long_output(long_output)
I am trying to have the bus loads in PSS/E to change by using python program. So I am trying to write a script in python where I could change loads to different values between two buses in PSS/E.
You can use API routine called "LOAD_CHNG_4" (search for this routin in API.pdf documentation). This routine belongs to the set of load data specification functions. It can be used to modify the data of an existing load in the working case.
Look at chapter II of the PSSE python API. It completely covers changing power flow data. Load data can be referenced with the bus number and load ID. You can change all the parameters you see in the nameplate when you manually enter in the load data itself. Use this function below to change the load:
ierr = psspy.load_chang(bus_number, load_id, intgar,realar)
See page 728 of the API manual for more info.
I wrote this class to handle loads in PSSE...It originally sent with SQL alchemy so it has some extras which I removed:
class Load():
def __init__(self,busnumber,loadID):
# Initialize the Branch Class Instance Variables to a Default 0; Except for the From,To and Circuit ID
self.bus = Bus(busnumber)
self.busI = busnumber
self.loadID = loadID
self.status = 0
self.Pload = 0.0
self.Qload = 0.0
self.errorList = []
self.init()
def init(self):
# Setup the load from the case
# Check to see if bus exists
busOk = self.bus.init()
if not busOk[0]:
return (False,self.bus.number,"The bus number %d is invalid or does not exist..." % self.bus.number)
if psspy.loddt2(self.bus.number,self.loadID,'TOTAL','ACT')[0] != 0:
return (False,False)
Scomplex = self.check(psspy.loddt2(self.bus.number,self.loadID,'TOTAL','ACT'),'loddt2','ACT') # Grab the S vector/phasor from the power flow case for the load specified at the BUS number
self.Pload = Scomplex.real
self.Qload = Scomplex.imag
self.status = self.check(psspy.lodint(self.bus.number,self.loadID,'STATUS'),'lodint','STATUS') # Grab the Status of the Load
return (True,self.bus.number)
def check(self,tuple,funName,subFun):
# Define Error Messages that should be accesable by the end-user and developer
loddt2 = {
0:'No error; P and Q or CMPVAL returned',
1:'Bus not found; P and Q or CMPVAL unchanged.',
2:'Load not found; P and Q or CMPVAL unchanged.',
3:'Bus type code is not 1, 2 or 3; P and Q or CMPVAL returned.',
4:'Load out-of-service; P and Q or CMPVAL returned.',
5:'Invalid value of STRING1 or STRING2; P and Q or CMPVAL unchanged.'
}
lodint = {
0:'No error; IVAL returned.',
1:'Bus not found; IVAL unchanged.',
2:'Load not found; IVAL unchanged.',
3:'Bus type code is not 1, 2 or 3; IVAL returned.',
4:'Load out-of-service; IVAL returned.',
5:'Invalid value of STRING; IVAL unchanged.'
}
funDic = {
'loddt2':loddt2,
'lodint':lodint
}
# Retrieve the Right Error Message
list = funDic[funName]
msg = list[tuple[0]]
if tuple[0] > 0:
logging.debug("Function Name: %s Sub Function Name: %s Error Message: %s"%(funName,subFun,msg))
return tuple[1]
def setLoad(self,loadP):
self.Pload = loadP
def updateCase(self):
# Setup Defaults
_i = psspy.getdefaultint()
_f = psspy.getdefaultreal()
cDef = psspy.getdefaultchar()
psspy.load_data_3(self.bus.number,self.loadID,[self.status,_i,_i,_i,_i],[self.Pload, self.Qload,_f,_f,_f,_f])
# To String Method
def __repr__(self):
return "%d %s %d %d" %(self.bus.number,self.loadID,self.Pload,self.Qload)
# printf Statement that Dumps information to CMD line...Homeage to C and C++
def printf(self):
print "\n Bus: %d \n Load ID: %s \n MW Value: %d \n MVAR Value: %d \n" % (self.bus.number,self.loadID,self.Pload,self.Qload)
If you call the function named, "updateCase" it will take what ever values are stored in the load object and refresh the PSSE case.
http://www.whit.com.au/blog/2011/07/run-psse-from-python-and-not-other-way/
I found this blog post about how to run Python command from PSSE. I don't think that your question has anything to do with ASP.NET though.
Using urllibs (or urllibs2) and wanting what I want is hopeless.
Any solution?
I'm not sure how the C# implementation works, but, as internet streams are generally not seekable, my guess would be it downloads all the data to a local file or in-memory object and seeks within it from there. The Python equivalent of this would be to do as Abafei suggested and write the data to a file or StringIO and seek from there.
However, if, as your comment on Abafei's answer suggests, you want to retrieve only a particular part of the file (rather than seeking backwards and forwards through the returned data), there is another possibility. urllib2 can be used to retrieve a certain section (or 'range' in HTTP parlance) of a webpage, provided that the server supports this behaviour.
The range header
When you send a request to a server, the parameters of the request are given in various headers. One of these is the Range header, defined in section 14.35 of RFC2616 (the specification defining HTTP/1.1). This header allows you to do things such as retrieve all data starting from the 10,000th byte, or the data between bytes 1,000 and 1,500.
Server support
There is no requirement for a server to support range retrieval. Some servers will return the Accept-Ranges header (section 14.5 of RFC2616) along with a response to report if they support ranges or not. This could be checked using a HEAD request. However, there is no particular need to do this; if a server does not support ranges, it will return the entire page and we can then extract the desired portion of data in Python as before.
Checking if a range is returned
If a server returns a range, it must send the Content-Range header (section 14.16 of RFC2616) along with the response. If this is present in the headers of the response, we know a range was returned; if it is not present, the entire page was returned.
Implementation with urllib2
urllib2 allows us to add headers to a request, thus allowing us to ask the server for a range rather than the entire page. The following script takes a URL, a start position, and (optionally) a length on the command line, and tries to retrieve the given section of the page.
import sys
import urllib2
# Check command line arguments.
if len(sys.argv) < 3:
sys.stderr.write("Usage: %s url start [length]\n" % sys.argv[0])
sys.exit(1)
# Create a request for the given URL.
request = urllib2.Request(sys.argv[1])
# Add the header to specify the range to download.
if len(sys.argv) > 3:
start, length = map(int, sys.argv[2:])
request.add_header("range", "bytes=%d-%d" % (start, start + length - 1))
else:
request.add_header("range", "bytes=%s-" % sys.argv[2])
# Try to get the response. This will raise a urllib2.URLError if there is a
# problem (e.g., invalid URL).
response = urllib2.urlopen(request)
# If a content-range header is present, partial retrieval worked.
if "content-range" in response.headers:
print "Partial retrieval successful."
# The header contains the string 'bytes', followed by a space, then the
# range in the format 'start-end', followed by a slash and then the total
# size of the page (or an asterix if the total size is unknown). Lets get
# the range and total size from this.
range, total = response.headers['content-range'].split(' ')[-1].split('/')
# Print a message giving the range information.
if total == '*':
print "Bytes %s of an unknown total were retrieved." % range
else:
print "Bytes %s of a total of %s were retrieved." % (range, total)
# No header, so partial retrieval was unsuccessful.
else:
print "Unable to use partial retrieval."
# And for good measure, lets check how much data we downloaded.
data = response.read()
print "Retrieved data size: %d bytes" % len(data)
Using this, I can retrieve the final 2,000 bytes of the Python homepage:
blair#blair-eeepc:~$ python retrieverange.py http://www.python.org/ 17387
Partial retrieval successful.
Bytes 17387-19386 of a total of 19387 were retrieved.
Retrieved data size: 2000 bytes
Or 400 bytes from the middle of the homepage:
blair#blair-eeepc:~$ python retrieverange.py http://www.python.org/ 6000 400
Partial retrieval successful.
Bytes 6000-6399 of a total of 19387 were retrieved.
Retrieved data size: 400 bytes
However, the Google homepage does not support ranges:
blair#blair-eeepc:~$ python retrieverange.py http://www.google.com/ 1000 500
Unable to use partial retrieval.
Retrieved data size: 9621 bytes
In this case, it would be necessary to extract the data of interest in Python prior to any further processing.
It may work best just to write the data to a file (or even to a string, using StringIO), and to seek in that file (or string).
I did not find any existing implementations of a file-like interface with seek() to HTTP URLs, so I rolled my own simple version: https://github.com/valgur/pyhttpio. It depends on urllib.request but could probably easily be modified to use requests, if necessary.
The full code:
import cgi
import time
import urllib.request
from io import IOBase
from sys import stderr
class SeekableHTTPFile(IOBase):
def __init__(self, url, name=None, repeat_time=-1, debug=False):
"""Allow a file accessible via HTTP to be used like a local file by utilities
that use `seek()` to read arbitrary parts of the file, such as `ZipFile`.
Seeking is done via the 'range: bytes=xx-yy' HTTP header.
Parameters
----------
url : str
A HTTP or HTTPS URL
name : str, optional
The filename of the file.
Will be filled from the Content-Disposition header if not provided.
repeat_time : int, optional
In case of HTTP errors wait `repeat_time` seconds before trying again.
Negative value or `None` disables retrying and simply passes on the exception (the default).
"""
super().__init__()
self.url = url
self.name = name
self.repeat_time = repeat_time
self.debug = debug
self._pos = 0
self._seekable = True
with self._urlopen() as f:
if self.debug:
print(f.getheaders())
self.content_length = int(f.getheader("Content-Length", -1))
if self.content_length < 0:
self._seekable = False
if f.getheader("Accept-Ranges", "none").lower() != "bytes":
self._seekable = False
if name is None:
header = f.getheader("Content-Disposition")
if header:
value, params = cgi.parse_header(header)
self.name = params["filename"]
def seek(self, offset, whence=0):
if not self.seekable():
raise OSError
if whence == 0:
self._pos = 0
elif whence == 1:
pass
elif whence == 2:
self._pos = self.content_length
self._pos += offset
return self._pos
def seekable(self, *args, **kwargs):
return self._seekable
def readable(self, *args, **kwargs):
return not self.closed
def writable(self, *args, **kwargs):
return False
def read(self, amt=-1):
if self._pos >= self.content_length:
return b""
if amt < 0:
end = self.content_length - 1
else:
end = min(self._pos + amt - 1, self.content_length - 1)
byte_range = (self._pos, end)
self._pos = end + 1
with self._urlopen(byte_range) as f:
return f.read()
def readall(self):
return self.read(-1)
def tell(self):
return self._pos
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if not object.__getattribute__(self, "debug"):
return attr
if hasattr(attr, '__call__'):
def trace(*args, **kwargs):
a = ", ".join(map(str, args))
if kwargs:
a += ", ".join(["{}={}".format(k, v) for k, v in kwargs.items()])
print("Calling: {}({})".format(item, a))
return attr(*args, **kwargs)
return trace
else:
return attr
def _urlopen(self, byte_range=None):
header = {}
if byte_range:
header = {"range": "bytes={}-{}".format(*byte_range)}
while True:
try:
r = urllib.request.Request(self.url, headers=header)
return urllib.request.urlopen(r)
except urllib.error.HTTPError as e:
if self.repeat_time is None or self.repeat_time < 0:
raise
print("Server responded with " + str(e), file=stderr)
print("Sleeping for {} seconds before trying again".format(self.repeat_time), file=stderr)
time.sleep(self.repeat_time)
A potential usage example:
url = "https://www.python.org/ftp/python/3.5.0/python-3.5.0-embed-amd64.zip"
f = SeekableHTTPFile(url, debug=True)
zf = ZipFile(f)
zf.printdir()
zf.extract("python.exe")
Edit: There is actually a mostly identical, if slightly more minimal, implementation in this answer: https://stackoverflow.com/a/7852229/2997179