How to parse results from a subprocess - python

I'm trying to pass a string in Python that was obtained from netstat to awk for use in building a new object.
Any clue why this isn't working? Please excuse the horrible coding, I just started using Python today and trying to learn how to use the language.
class NetstatGenerator():
def __init__(self):
import subprocess
self.results = subprocess.Popen("netstat -anbp tcp", shell=True, stdout=subprocess.PIPE).stdout.read()
self.list = []
self.parse_results()
def parse_results(self):
lines = self.results.splitlines(True)
for line in lines:
if not str(line).startswith("tcp"):
print("Skipping")
continue
line_data = line.split(" ")
self.list.append(NetstatData(self.line_data[0], self.line_data[15], self.line_data[18], self.line_data[23]))
def get_results(self):
return self.list
class NetstatData():
def __init__(self, protocol, local_address, foreign_address, state):
self.protocol = protocol
self.local_address = local_address
self.foreign_address = foreign_address
self.state = state
def get_protocol(self):
return str(self.protocol)
def get_local_address(self):
return str(self.local_address)
def get_foreign_address(self):
return str(self.foreign_address)
def get_state(self):
return str(self.state)
data = NetstatGenerator()

Sorry, netstat does not support -b on Linux, and I don't have a BSD box lying around.
Let's assume you have a list of lines, called netstat_output, with items like this:
tcp 0 0 127.0.0.1:9557 127.0.0.1:56252 ESTABLISHED -
To parse a single line, you split() it and pick elements at indexes 0, 3, 4, 5.
To store the items, you don't need to define a boilerplate holding class; namedtuple does what you want:
from collections import namedtuple
NetstatInfo = namedtuple('NetstatInfo',
['protocol', 'local_address', 'remote_address', 'state'])
Now you can parse a line:
def parseLine(line):
fields = line.split()
if len(fields) == 7 and fields[0] in ('tcp', 'udp'):
# alter this condition to taste;
# remember that netstat injects column headers.
# consider other checks, too.
return NetstatInfo(fields[0], fields[3], fields[4], fields[5])
# otherwise, this function implicitly returns None
Now something like this must be possible:
result = []
for line in subprocess.Popen(...):
item = parseLine(line)
if line: # parsed successfully
result.append(line)
# now result is what you wanted; e.g. access result[0].remote_address

Related

Create objects in Python based on file

I'm coding a game in Python 3 and I need to create an unknown number of objects with each objects properties based on the contents of a file.
To explain, I'll dump some code here:
class attack(object):
def __init__(self, name, power):
self.name = name
self.element = int(power)
import getline from linecache
Attacks = []
count = 1
while 1==1:
line=getline("Attacks.txt", count)
line = line.rstrip()
if line == "":
break
else:
linelist = line.split()
#something involving "attack(linelist[1], linelist[2])"
Attacks.append(item)
count += 1
"Attacks.txt" contains this:
0 Punch 2
1 Kick 3
2 Throw 4
3 Dropkick 6
4 Uppercut 8
When the code is done, the list "Attacks" should contain 5 attack objects, one for each line of "Attacks.txt" with the listed name and power. The name is for the user only; in the code, each object will only be called for by its place in its list.
The idea is that the end user can change "Attacks.txt" (and other similar files) to add, remove or change entries; that way, they can modify my game without digging around in the actual code.
The issue is I have no idea how to create objects on the fly like this or if I even can. I already have working code that builds a list from a file; the only problem is the object creation.
My question, simply put, is how do I do this?
I had the same problem someday:
How to call class constructor having its name in text variable? [Python]
You obviously have to define classes which names are in file. I assume that is done. And you need to have them in current module namespace globals()
from somelib import Punch, Kick, Throw, Dropkick, Uppercut
globals()[class_name](x, y)
line = getline("Attacks.txt", count)
line = line.rstrip()
linelist = line.split()
class_name = linelist[1]
value = linelist[2]
class_object = globals()[class_name]
item = class_object(value)
# or shortly in one line:
# item = globals()[linelist[1]](linelist[2])
You could create a class like so providing overloading operators to support the operations:
class Operation:
def __init__(self, *header):
self.__dict__ = dict(zip(['attack', 'power'], header))
class Attack:
def __init__(self, *headers):
self.__dict__ = {"attack{}".format(i):Operation(*a) for i, a in enumerate(headers, start=1)}
def __setitem__(self, attack_type, new_power):
self.__dict__ = {a:Operation(attack_type, new_power) if b.attack == attack_type else b for a, b in self.__dict__.items()}
def __getitem__(self, attack):
return [b.power for _, b in self.__dict__.items() if b.attack == attack]
#property
def power_listings(self):
return '\n'.join(['{} {}'.format(*[b.attack, b.power]) for _, b in self.__dict__.items()])
with open('filename.txt') as f:
f = [i.strip('\n').split() for i in f]
a = Attack(*f)
print(a.power_listings)
a['Throw'] = 6 #updating the power of any occurrence of Throw
Output:
Throw 6
Kick 3
Punch 2
Uppercut 8
Dropkick 6

Python losing list of objects

When I execute this program I get an empty list:
I am expecting it to create the list of objects and append the objects to the obj_list_addresses list.
Then when I call the get_good_addresses() I expect it to go back through that list and execute code on each object in the list only the list returns empty [] almost like its getting overwritten.
I am fairly new to python and know that I am missing something important.
Main:
from address import Address
from address_processor import AddressProcessor
addresses=[]
addresses = open('test.txt').read().splitlines()
proccess_addresses = AddressProcessor(addresses)
proccess_addresses.create_addresses_obj()
proccess_addresses.get_good_addresses()
AddressProcessor Class:
import multiprocessing
from address import Address
class AddressProcessor(object):
"""AddressProcessor will process a txt file with addresses"""
def __init__(self, addresses):
self.addresses = addresses
self.return_addresses = []
self.obj_list_addresses = []
def create_addresses_obj(self):
jobs = []
for address in self.addresses:
process = multiprocessing.Process(target=self.worker, args=(address,))
jobs.append(process)
process.start()
for job in jobs:
job.join()
print('created objects for addresses in text file')
def worker(self, address):
obj = Address(address)
self.obj_list_addresses.append(obj)
def get_good_addresses(self):
print self.obj_list_addresses
Address Class:
from string import replace
from pprint import pprint
class Address(object):
"""
This is address class send it an address it will look up
the addy and return json string of the parcels that matched the address
then update status if it was the only one returned its good if not its bad
"""
def __init__(self, address):
self.address = address
self.status = ''
self.json_string = ''
self.set_json_string()
def get_address(self):
return self.address
def set_json_string(self):
r = requests.get('urlbasegoeshere'+replace(self.address," ","+")+'&pagesize=40&page=1')
self.json_string = r.json
self.set_status()
def set_status(self):
if len(self.json_string) == 1:
self.status = 1
elif len(self.json_string)!=1:
self.status = 0
def get_status(self):
return self.status
Why are you using 'multiprocessing' to create address objects? Different process don't share memory, i.e. they don't share objects. This is not a python thing, it's the same whatever language you use.
Replace these three lines
process = multiprocessing.Process(target=self.worker, args=(address,))
jobs.append(process)
process.start()
with
self.worker(address)

Set with expiration not working inside running Python script

I have this class called DecayingSet which is a deque with expiration
class DecayingSet:
def __init__(self, timeout): # timeout in seconds
from collections import deque
self.timeout = timeout
self.d = deque()
self.present = set()
def add(self, thing):
# Return True if `thing` not already in set,
# else return False.
result = thing not in self.present
if result:
self.present.add(thing)
self.d.append((time(), thing))
self.clean()
return result
def clean(self):
# forget stuff added >= `timeout` seconds ago
now = time()
d = self.d
while d and now - d[0][0] >= self.timeout:
_, thing = d.popleft()
self.present.remove(thing)
I'm trying to use it inside a running script, that connects to a streaming api.
The streaming api is returning urls that I am trying to put inside the deque to limit them from entering the next step of the program.
class CustomStreamListener(tweepy.StreamListener):
def on_status(self, status, include_entities=True):
longUrl = status.entities['urls'][0]['expanded_url']
limit = DecayingSet(86400)
l = limit.add(longUrl)
print l
if l == False:
pass
else:
r = requests.get("http://api.some.url/show?url=%s"% longUrl)
When i use this class in an interpreter, everything is good.
But when the script is running, and I repeatedly send in the same url, l returns True every time indicating that the url is not inside the set, when is supposed to be. What gives?
Copying my comment ;-) I think the indentation is screwed up, but it looks like you're creating a brand new limit object every time on_status() is called. Then of course it would always return True: you'd always be starting with an empty limit.
Regardless, change this:
l = limit.add(longUrl)
print l
if l == False:
pass
else:
r = requests.get("http://api.some.url/show?url=%s"% longUrl)
to this:
if limit.add(longUrl):
r = requests.get("http://api.some.url/show?url=%s"% longUrl)
Much easier to follow. It's usually the case that when you're comparing something to a literal True or False, the code can be made more readable.
Edit
i just saw in the interpreter the var assignment is the culprit.
How would I use the same obj?
You could, for example, create the limit object at the module level. Cut and paste ;-)

Error accessing class objects in python

I am having some problem accessing class instances. I am calling the class from a procedure, name of instance is defined in some variable. I want the instance name to be created of that value and then want to access it, but when i access it is giving error. Can some one please help to resolve this issue.
class myclass:
def __init__(self,object):
self.name = object
def mydef():
global a1
b = "a1"
b = myclass(b)
mydef()
print a1.name
Second Problem:
In my actual script, I have to create a large number of such instances from this function (around 100). So defining their name as global would be painful, is there a way i could access those instances outside function without having to declare them as global.
Modification:
class myclass:
def __init__(self,object,typename):
self.name = object
self.typeid = typename
def mydef():
file_han = open(file,"r")
while True:
line = file_han.readline()
if not line:
break
start = line.find('"')
end = line.find('"',start+1)
string_f = line[start+1:end]
myclass(string_f,'a11')
mydef(file)
print def.name
print def.typeid
File Contents are :
a11 "def"
a11 "ghi"
a11 "eff"
Here's how I'd do it. I don't know why you're messing around with globals, if you'd care to explain, I'll update my answer.
class Myclass(object):
def __init__(self, name):
self.name = name
def mydef():
return Myclass("a1")
a1 = mydef()
print a1.name
Gather your instances in a list:
instances = []
for x in range(1000):
instances.append(Myclass("Instance {0}".format(x)))
print instance[42].name
Note the changes:
Class names should be capitalized
Use object as the base class of your classes (since python 2.2, but no longer necessary in 3.x)
Don't shadow the built-in object with your parameter name
Just use the string "a1" directly as a parameter instead of assigning it to a variable
Return something from the function instead of passing the result by global variable
RE: Comment
You haven't said anything about the format of these files, so I'll just give an example where the file to be read contains one class name per line, and nothing else:
def mydef(filename):
ret = []
with open(filename) as f:
for line in f:
# Call `strip` on line to remove newline and surrounding whitespace
ret.append(Myclass(line.strip()))
return ret
So if you have several files and wish to add all your instances from all your files to a large list, do it like this:
instances = []
for filename in ["myfile1", "myfile2", "myfile3"]:
instances.extend(mydef(filename))
RE: OP Edit
def mydef(filename):
ret = []
with open(filename, "r") as file_han:
for line in file_han:
string_f = line.split('"')[1]
ret.append(Myclass(string_f))
return ret
i = mydef("name_of_file")
RE: Comment
Oh, you want to access them by name. Then return a dict instead:
def mydef(filename):
ret = {}
with open(filename, "r") as file_han:
for line in file_han:
string_f = line.split('"')[1]
ret[string_f] = Myclass(string_f)
return ret
i = mydef("name_of_file")
print i["ghi"].name # should print "ghi"
RE: Comment
If I understand you correctly, you want to have it both ways -- index by both line number and name. Well then why don't you return both a list and a dictionary?
def mydef(filename):
d = {}
L = []
with open(filename, "r") as file_han:
for line in file_han:
string_f = line.split('"')[1]
instance = Myclass(string_f)
d[string_f] = instance
L.append(instance)
return L, d
L, d = mydef("name_of_file")
print d["ghi"].name
print L[3]
print L.index(d["ghi"])
You could use class as repository for your instances, for example
class Named(object):
def __init__(self,name):
self.name = name
def __new__(cls,name):
instance = super(type,cls).__new__(cls,name)
setattr(cls,name,instance)
return instance
def __repr__(self):
return 'Named[%s]'%self.name
Named('hello')
Named('x123')
Named('this is not valid attribute name, but also working')
print(Named.hello,Named.x123,getattr(Named,'this is not valid attribute name, but also working'))

How can I refer to a function not by name in its definition in python?

I am maintaining a little library of useful functions for interacting with my company's APIs and I have come across (what I think is) a neat question that I can't find the answer to.
I frequently have to request large amounts of data from an API, so I do something like:
class Client(object):
def __init__(self):
self.data = []
def get_data(self, offset = 0):
done = False
while not done:
data = get_more_starting_at(offset)
self.data.extend(data)
offset += 1
if not data:
done = True
This works fine and allows me to restart the retrieval where I left off if something goes horribly wrong. However, since python functions are just regular objects, we can do stuff like:
def yo():
yo.hi = "yo!"
return None
and then we can interrogate yo about its properties later, like:
yo.hi => "yo!"
my question is: Can I rewrite my class-based example to pin the data to the function itself, without referring to the function by name. I know I can do this by:
def get_data(offset=0):
done = False
get_data.data = []
while not done:
data = get_more_starting_from(offset)
get_data.data.extend(data)
offset += 1
if not data:
done = True
return get_data.data
but I would like to do something like:
def get_data(offset=0):
done = False
self.data = [] # <===== this is the bit I can't figure out
while not done:
data = get_more_starting_from(offset)
self.data.extend(data) # <====== also this!
offset += 1
if not data:
done = True
return self.data # <======== want to refer to the "current" object
Is it possible to refer to the "current" object by anything other than its name?
Something like "this", "self", or "memememe!" is what I'm looking for.
I don't understand why you want to do this, but it's what a fixed point combinator allows you to do:
import functools
def Y(f):
#functools.wraps(f)
def Yf(*args):
return inner(*args)
inner = f(Yf)
return Yf
#Y
def get_data(f):
def inner_get_data(*args):
# This is your real get data function
# define it as normal
# but just refer to it as 'f' inside itself
print 'setting get_data.foo to', args
f.foo = args
return inner_get_data
get_data(1, 2, 3)
print get_data.foo
So you call get_data as normal, and it "magically" knows that f means itself.
You could do this, but (a) the data is not per-function-invocation, but per function (b) it's much easier to achieve this sort of thing with a class.
If you had to do it, you might do something like this:
def ybother(a,b,c,yrselflambda = lambda: ybother):
yrself = yrselflambda()
#other stuff
The lambda is necessary, because you need to delay evaluation of the term ybother until something has been bound to it.
Alternatively, and increasingly pointlessly:
from functools import partial
def ybother(a,b,c,yrself=None):
#whatever
yrself.data = [] # this will blow up if the default argument is used
#more stuff
bothered = partial(ybother, yrself=ybother)
Or:
def unbothered(a,b,c):
def inbothered(yrself):
#whatever
yrself.data = []
return inbothered, inbothered(inbothered)
This last version gives you a different function object each time, which you might like.
There are almost certainly introspective tricks to do this, but they are even less worthwhile.
Not sure what doing it like this gains you, but what about using a decorator.
import functools
def add_self(f):
#functools.wraps(f)
def wrapper(*args,**kwargs):
if not getattr(f, 'content', None):
f.content = []
return f(f, *args, **kwargs)
return wrapper
#add_self
def example(self, arg1):
self.content.append(arg1)
print self.content
example(1)
example(2)
example(3)
OUTPUT
[1]
[1, 2]
[1, 2, 3]

Categories

Resources