When I execute this program I get an empty list:
I am expecting it to create the list of objects and append the objects to the obj_list_addresses list.
Then when I call the get_good_addresses() I expect it to go back through that list and execute code on each object in the list only the list returns empty [] almost like its getting overwritten.
I am fairly new to python and know that I am missing something important.
Main:
from address import Address
from address_processor import AddressProcessor
addresses=[]
addresses = open('test.txt').read().splitlines()
proccess_addresses = AddressProcessor(addresses)
proccess_addresses.create_addresses_obj()
proccess_addresses.get_good_addresses()
AddressProcessor Class:
import multiprocessing
from address import Address
class AddressProcessor(object):
"""AddressProcessor will process a txt file with addresses"""
def __init__(self, addresses):
self.addresses = addresses
self.return_addresses = []
self.obj_list_addresses = []
def create_addresses_obj(self):
jobs = []
for address in self.addresses:
process = multiprocessing.Process(target=self.worker, args=(address,))
jobs.append(process)
process.start()
for job in jobs:
job.join()
print('created objects for addresses in text file')
def worker(self, address):
obj = Address(address)
self.obj_list_addresses.append(obj)
def get_good_addresses(self):
print self.obj_list_addresses
Address Class:
from string import replace
from pprint import pprint
class Address(object):
"""
This is address class send it an address it will look up
the addy and return json string of the parcels that matched the address
then update status if it was the only one returned its good if not its bad
"""
def __init__(self, address):
self.address = address
self.status = ''
self.json_string = ''
self.set_json_string()
def get_address(self):
return self.address
def set_json_string(self):
r = requests.get('urlbasegoeshere'+replace(self.address," ","+")+'&pagesize=40&page=1')
self.json_string = r.json
self.set_status()
def set_status(self):
if len(self.json_string) == 1:
self.status = 1
elif len(self.json_string)!=1:
self.status = 0
def get_status(self):
return self.status
Why are you using 'multiprocessing' to create address objects? Different process don't share memory, i.e. they don't share objects. This is not a python thing, it's the same whatever language you use.
Replace these three lines
process = multiprocessing.Process(target=self.worker, args=(address,))
jobs.append(process)
process.start()
with
self.worker(address)
Related
It is probably the wrong title, but here is my problem.
I have a system comprised of a microcontroller (MCU), a serial interface (SPI), a DAC (Digital / Analog converter), an electrode (E). Each element is defined as a class in my python modelization.
As a first step, I want to monitor the output on the electrode as I input something in the microcontroller.
Let's consider the following:
Input: 2 mA on the electrode during 1 ms.
MCU send the new DAC value via the SPI: 30 us
DAC updates its register and output: 400 us
MCU send a switch on command to the electrode: 1 us
The electrode is now outputting.
1 ms later, send a switch off command to the electrode: 1us
The electrode doesn't output anymore.
My 2 biggest issues are 1. How to take into account this time component and 2. How to monitor the SPI line to determine if something has to be done.
class Electrode:
def __init__(self, id):
self.id = id
self.switch = False
self.value = 0
def output(self):
if self.switch:
return self.value
else:
return 0
class SPI:
def __init__(self):
self.msg = None
class MCU:
def __init__(self):
self.name = "MicroController"
def send_SPI_msg(self, SPI, msg):
SPI.msg = msg
class DAC:
def __init__(id):
self.id = id
self.cs = 1
self.register = None
self.output = None
def read_SPI_msg(self, SPI):
message = SPI.msg
# update register and output
My system actually has 16 DACs and electrodes and a field-programmable gate array which are all listening to the same SPI. What I described above is a fairly simplified version.
Question is: How to have the components check the value in SPI.msg regularly and act accordingly?
In reality, each component is doing its life. Thus actions are performed in parallel. Since I'm trying to simulate the timeline and the action performed, I do not mind doing everything serially with a timeline variable (attribute) for each element. I just have issues to figure out how to have my classes interact together.
i.e. I can't do the following in python or I will get stuck:
class DAC:
def __init__(id):
# init
def read_SPI_msg(self, SPI):
while True:
message = SPI.msg
# update register and output if needed
Maybe an event triggering could be used... But I don't know how.
Maybe with multithreading, defining one thread / element?
EDIT: Current state:
class SPI:
def __init__(self):
self.attached_dacs = []
self.attached_fpga = []
self.attached_mcu = []
def attach_device(self, device):
if type(device) == DAC:
self.attached_dacs.append(device)
elif type(device) == FPGA:
self.attached_fpga.append(device)
elif type(device) == MCU:
self.attached_mcu.append(device)
def send_message(self, msg):
for device in self.attached_dacs + self.attached_fpga:
device.on_spi_message(self, msg)
class SpiAttachableDevice:
def on_spi_message(self, SPI, message):
if self.cs:
self.execute_SPI_message(message)
else:
return None
class DAC(SpiAttachableDevice):
def __init__(self, id):
self.id = id
self.cs = False # Not listening
def execute_SPI_message(message):
# Do stuff
class FPGA(SpiAttachableDevice):
def __init__(self):
self.electrodes = list()
self.cs = False # Not listening
def execute_SPI_message(message):
# Do stuff
class MCU:
def __init__(self):
self.electrodes = list()
I'm assuming you want to keep it single-threaded and you don't use asyncio. In this case, you might want to employ observer or pub/sub pattern when implementing the SPI:
class SPI:
def __init__(self):
self.attached_devices = []
def attach_device(self, device):
self.attached_devices.append(device)
def send_message(self, msg):
for device in self.attached_devices:
device.on_spi_message(self, msg)
class SpiAttachableDevice:
def on_spi_message(self, spi_instance, message):
raise NotImplementedError('subclass me!')
So you can use it like this:
spi = SPI()
device_1 = Device()
device_2 = Device()
spi.attach_device(device_1)
spi.attach_device(device_2)
spi.send_message('hello')
I haven't done anything to be able to send SPI messages from Device objects, but you can update the abstraction accordingly.
You could move the while loop simply outside:
class SPI:
def __init__(self, msg):
self.msg = msg
class Component:
def __init__(self, spi):
self.spi = spi
def tick(self, t):
msg = self.spi.msg
if msg = "...":
...
spi = SPI()
components = [Component(spi), ...]
for t in range(TOTAL_TIME):
for component in components:
component.tick(t)
As stated in your comment you want more a timeline view on what is happening. You can have an explicit timeline with which your components interact. External input (state changes) can be set beforehand in the same manner. To order the timemline I'll just run sort each time but it would probably be more performant to use something like a priority queue.
This mainly differs from Vovanrock2002 answer by not recursing in each timestep and having an explicit timeline.
class Component:
def __init__(self, timeline):
self._timeline = timeline
self._out = [] #all connected components
def poke(self, changed_object, time):
return []
class Clock(Component):
def __init__(self, timeline):
Component.__init__(self, timeline)
self._out.append(self)
self.msg = "tick"
self._timeline.append((200, self, msg))
def poke(self, time, changed_object, msg):
self._timeline.append((time + 200, self, self.msg))
timeline = []
spi = SPI(timeline)
components = [spi, Clock(timeline), ComponentA(timeline), ...]
timeline.append((500, spi, "new DAC value"))
while timeline:
timeline.sort(key=lambda event: event[0], reverse=True)
event = timeline.pop()
time, changed_component, msg:
for connected_component in changed_component._out:
connected_component.poke(time, changed_component, msg)
This way you have an explicit timeline (which you could also "record", just add each popped event to some list) and you can have arbitrarily connected components (e.g. if you want to have multiple SPIs).
I'm new to Python and trying to use class inheritance, and haven't been able to wrap my head around sharing variables. I have two classes so far, Scan and Ping:
scan.py
class Scan(object):
""" Super class for scans """
identifier = str(random.getrandbits(128))
timestamp = int(time.time())
results_dir = "/tmp/{}/".format(identifier)
total_hosts = 0
def __init__(self, target_hosts=None, target_ports=None):
self.__target_hosts = target_hosts
self.__target_ports = target_ports
self.scan_string = "-sT -O --script auth,vuln"
#property
def target_hosts(self):
return self.__target_hosts
#target_hosts.setter
def target_hosts(self, hosts):
""" Sets target hosts for scan """
""" Nmap Expects to be single-spaced '1 2 3' separated """
self.__target_hosts = hosts.replace(", ", " ")
ping.py
import nmap
from .scan import Scan
class Ping(Scan):
""" Ping sweep """
def __init__(self, ping_string, hosts):
super(Scan, self).__init__()
self.ping_string = ping_string
self.hosts = hosts
In my script that pretty much calls everything, I'm attempting:
from models.scan import Scan
from models.ping import Ping
s = Scan()
hosts = "192.168.0.0/24"
s.target_hosts = hosts
pinger = Ping(ping_string, s.target_hosts)
This line doesn't make sense to me ... if Ping inherits from Scan, why does this only work when I call s.targets_hosts ? Shouldn't I be able to call target_hosts from my Ping class like Ping.target_hosts ?
What might be making this hard to understand is that it's an odd example. In your example, the correct input for the hosts parameter that is needed to make an instance of Ping needs to come from a property only accessible from an instance of Ping (or its parent Scan).
Any method (or property) that has self as a parameter relies on a specific instance of that class which needs to be created first. If there was a staticmethod or classmethod they would be callable directly from the class.
You can only get and set target_hosts from a specific instance of the class (in this case either Scan or Ping). If you call Scan.target_hosts or Ping.target_hosts, it will return something like <property at 0x51cd188>. This is basically returning an unusable function from the class. It's saying, "The class dictionary contains instructions here on how to return some useful stuff from AN INSTANCE of <class>."
If you make an instance of Ping or Scan, you now have access to your target_hosts property.
>>> scan = Scan()
>>> scan.target_hosts = 'host1, host2, host3'
>>> scan.target_hosts
'host1 host2 host3'
>>> ping = Ping('stuff', 'nonsense')
>>> ping.hosts
'nonsense'
>>> ping.target_hosts = 'host4, host5, host6'
>>> ping.target_hosts
'host4 host5 host6'
You could run your script with a dummy Ping instance. This should work.
from models.scan import Scan
from models.ping import Ping
dummy = Ping('ignore', 'this')
hosts = "192.168.0.0/24"
dummy.target_hosts = hosts
pinger = Ping(ping_string, dummy.target_hosts)
Or, if Scan had a staticmethod, Ping could use it as well.
class Scan(object):
""" Super class for scans """
identifier = str(random.getrandbits(128))
timestamp = int(time.time())
results_dir = "/tmp/{}/".format(identifier)
total_hosts = 0
def __init__(self, target_hosts=None, target_ports=None):
self.__target_hosts = target_hosts
self.__target_ports = target_ports
self.scan_string = "-sT -O --script auth,vuln"
#staticmethod
def prep_hosts(hosts):
return hosts.replace(", ", " ")
...
and then
from models.scan import Scan
from models.ping import Ping
hosts = "192.168.0.0/24"
input_hosts = Ping.prep_hosts(hosts) # or Scan.prep_hosts(hosts)
pinger = Ping(ping_string, input_hosts)
I'm coding a virtual assistant in Python, and I want to store each request, response and possible error in a database.
I'm using one class for request, another class for response and another class for error.
How can I create an ID variable that is shared for the respectives classes instances, for example:
First run of the program (the normal and correct running of the program):
request_id = 1
response_id = 1
Second run (an error occurred and stopped the program to proceed to the response class):
request_id = 2
error_id = 2
Third run (the program ran fine and the response class skipped the id 2 -
that is the behavior that I want):
request_id = 3
response_id = 3
Note that in the third run, that response_id received the id 3 and the response_id = 2 will never exist, cause in the second run the proccess started with request and stopped in the error.
The ID variable must be always unique, even when my program crashes and I must restart him. I know I could grab the last id in the database when my program runs, but there's a way to do it without envolving the database?
Since you are using database to store the request and response why don't you use database to generate this id for you.
This can be done by creating the table with primary key int auto increment. Every request/response should be inserted into database, and the database will generate an unique id for each record inserted.
a possible solution would be to use Pyro4 instead of a DB if you don't want it. You can use the following code:
Tracker.py
import Pyro4
#Pyro4.expose
#Pyro4.behavior(instance_mode="single")
class Tracker(object):
def __init__(self):
self._id = None
def setId(self, value):
print "set well :)", value
self._id = value
print self._id
def getId(self):
print "returning", self._id
return self._id
daemon = Pyro4.Daemon()
uri = daemon.register(Tracker)
print("URI: ", uri)
daemon.requestLoop()
Status.py
import Pyro4
class Status(object):
def __init__(self, id):
self._id = id
self._pyro = None
def connect(self, target):
self._pyro = Pyro4.Proxy(target)
def updateId(self):
if ( not self._pyro is None ):
self._pyro.setId(self._id)
print "updated"
else:
print "please connect"
def getId(self):
if ( not self._pyro is None ):
return self._pyro.getId()
else:
print "please connect"
Success.py
from Status import *
class Success(Status):
def __init__(self):
super(Success,self).__init__(1)
Wait.py
from Status import *
class Wait(Status):
def __init__(self):
super(Wait,self).__init__(1)
Error.py
from Status import *
class Error(Status):
def __init__(self):
super(Error,self).__init__(3)
run.py
from Success import *
from Wait import *
from Error import *
#just an example
s = Success()
w = Wait()
e = Error()
s.connect("PYRO:obj_c98931f8b95d486a9b52cf0edc61b9d6#localhost:51464")
s.updateId()
print s.getId()
w.connect("PYRO:obj_c98931f8b95d486a9b52cf0edc61b9d6#localhost:51464")
w.updateId()
print s.getId()
e.connect("PYRO:obj_c98931f8b95d486a9b52cf0edc61b9d6#localhost:51464")
e.updateId()
print s.getId()
Of course you would need to use a different URI but you should have a good idea now. using Pyro you could also specify a static URI name if needed.
The output should be:
$ c:\Python27\python.exe run.py
updated
1
updated
2
updated
3
HTH
I'm trying to pass a string in Python that was obtained from netstat to awk for use in building a new object.
Any clue why this isn't working? Please excuse the horrible coding, I just started using Python today and trying to learn how to use the language.
class NetstatGenerator():
def __init__(self):
import subprocess
self.results = subprocess.Popen("netstat -anbp tcp", shell=True, stdout=subprocess.PIPE).stdout.read()
self.list = []
self.parse_results()
def parse_results(self):
lines = self.results.splitlines(True)
for line in lines:
if not str(line).startswith("tcp"):
print("Skipping")
continue
line_data = line.split(" ")
self.list.append(NetstatData(self.line_data[0], self.line_data[15], self.line_data[18], self.line_data[23]))
def get_results(self):
return self.list
class NetstatData():
def __init__(self, protocol, local_address, foreign_address, state):
self.protocol = protocol
self.local_address = local_address
self.foreign_address = foreign_address
self.state = state
def get_protocol(self):
return str(self.protocol)
def get_local_address(self):
return str(self.local_address)
def get_foreign_address(self):
return str(self.foreign_address)
def get_state(self):
return str(self.state)
data = NetstatGenerator()
Sorry, netstat does not support -b on Linux, and I don't have a BSD box lying around.
Let's assume you have a list of lines, called netstat_output, with items like this:
tcp 0 0 127.0.0.1:9557 127.0.0.1:56252 ESTABLISHED -
To parse a single line, you split() it and pick elements at indexes 0, 3, 4, 5.
To store the items, you don't need to define a boilerplate holding class; namedtuple does what you want:
from collections import namedtuple
NetstatInfo = namedtuple('NetstatInfo',
['protocol', 'local_address', 'remote_address', 'state'])
Now you can parse a line:
def parseLine(line):
fields = line.split()
if len(fields) == 7 and fields[0] in ('tcp', 'udp'):
# alter this condition to taste;
# remember that netstat injects column headers.
# consider other checks, too.
return NetstatInfo(fields[0], fields[3], fields[4], fields[5])
# otherwise, this function implicitly returns None
Now something like this must be possible:
result = []
for line in subprocess.Popen(...):
item = parseLine(line)
if line: # parsed successfully
result.append(line)
# now result is what you wanted; e.g. access result[0].remote_address
When I create an object in some method, I can't use it in any other method. So the use of the object is limited just to that method. But I would like to create the object somehow, that could use it in my whole module.
Here is the code of the module in which I want to create the object so I could use it in every method. (It's not so important what it should do, but for those who cares, it'll be network configurator which using netlink socket to communicate with the kernel).
In the method configureBridge() (the 4th method from the beginning) I tried to create an object and use it (ip = IPRoute() ... ip.release()) and it worked, but I couldn't use the object variable ip in any other function apart from configureBridge(). Could someone help me with that?
class PyrouteTwo(Configurator):
def __init__(self, inRollback=False):
super(PyrouteTwo, self).__init__(ConfigApplier(), inRollback)
self.runningConfig = RunningConfig()
logging.debug("testmark.PyR2.init")
def begin(self):
if self.configApplier is None:
self.configApplier = ConfigApplier()
if self.runningConfig is None:
self.runningConfig = RunningConfig()
logging.debug("testmark.PyR2.begin")
def commit(self):
self.configApplier = None
self.runningConfig.save()
self.runningConfig = None
logging.debug("testmark.PyR2.commit")
def configureBridge(self, bridge, **opts):
self.configApplier.addBridge(bridge)
if bridge.port:
bridge.port.configure(**opts)
self.configApplier.addBridgePort(bridge)
self.configApplier.setIfaceConfigAndUp(bridge)
logging.debug("testmark.PyR2.confBridge..")
# I am using the object here:
ip = IPRoute(fork=True)
dev = ip.link_lookup(ifname='em1')[0]
logging.debug("pyroute2 link_lookup output: %d", dev)
ip.release()
# there are some similar functions like configureVAN etc. in which I want
# to use the object
class ConfigApplier(object):
def _setIpConfig(self, iface):
ipConfig = iface.ipConfig
logging.debug("testmark.PyR2.ConfApplier.setIpConf.")
if ipConfig.ipaddr:
self.removeIpConfig(iface)
ipwrapper.addrAdd(iface.name, ipConfig.ipaddr,
ipConfig.netmask)
if ipConfig.gateway and ipConfig.defaultRoute:
ipwrapper.routeAdd(['default', 'via', ipConfig.gateway])
def removeIpConfig(self, iface):
ipwrapper.addrFlush(iface.name)
def setIfaceMtu(self, iface, mtu):
ipwrapper.linkSet(iface, ['mtu', str(mtu)])
def ifup(self, iface):
ipwrapper.linkSet(iface.name, ['up'])
if iface.ipConfig.bootproto == 'dhcp':
dhclient = DhcpClient(iface.name)
dhclient.start(iface.ipConfig.async)
def ifdown(self, iface):
ipwrapper.linkSet(iface.name, ['down'])
dhclient = DhcpClient(iface.name)
dhclient.shutdown()
def setIfaceConfigAndUp(self, iface):
if iface.ip:
self._setIpConfig(iface)
if iface.mtu:
self.setIfaceMtu(iface.name, iface.mtu)
self.ifup(iface)
def addBridge(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'addbr', bridge.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFUP, err)
def addBridgePort(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'addif', bridge.name,
bridge.port.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFUP, err)
def removeBridge(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'delbr', bridge.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFDOWN, err)
logging.debug("testmarkPyR2.ConfApplier.remBridge")
# ...
def createLibvirtNetwork(self, network, bridged, iface, qosInbound=None,
qosOutbound=None):
netXml = libvirtCfg.createNetworkDef(network, bridged, iface,
qosInbound, qosOutbound)
libvirtCfg.createNetwork(netXml)
logging.debug("testmarkPyR2.ConfApplier.createLibvirtNetwork")
def removeLibvirtNetwork(self, network):
libvirtCfg.removeNetwork(network)
logging.debug("testmarkPyR2.ConfApplier.remLibvirtNetwork")
You can either declare it as object specific attibute by doing -
self.ip = value # Now its a object specific variable
or make it a class veriable before assign it a value.
class PyrouteTwo(Configurator):
ip = None # Now its a class variable