I'm new to Python and trying to use class inheritance, and haven't been able to wrap my head around sharing variables. I have two classes so far, Scan and Ping:
scan.py
class Scan(object):
""" Super class for scans """
identifier = str(random.getrandbits(128))
timestamp = int(time.time())
results_dir = "/tmp/{}/".format(identifier)
total_hosts = 0
def __init__(self, target_hosts=None, target_ports=None):
self.__target_hosts = target_hosts
self.__target_ports = target_ports
self.scan_string = "-sT -O --script auth,vuln"
#property
def target_hosts(self):
return self.__target_hosts
#target_hosts.setter
def target_hosts(self, hosts):
""" Sets target hosts for scan """
""" Nmap Expects to be single-spaced '1 2 3' separated """
self.__target_hosts = hosts.replace(", ", " ")
ping.py
import nmap
from .scan import Scan
class Ping(Scan):
""" Ping sweep """
def __init__(self, ping_string, hosts):
super(Scan, self).__init__()
self.ping_string = ping_string
self.hosts = hosts
In my script that pretty much calls everything, I'm attempting:
from models.scan import Scan
from models.ping import Ping
s = Scan()
hosts = "192.168.0.0/24"
s.target_hosts = hosts
pinger = Ping(ping_string, s.target_hosts)
This line doesn't make sense to me ... if Ping inherits from Scan, why does this only work when I call s.targets_hosts ? Shouldn't I be able to call target_hosts from my Ping class like Ping.target_hosts ?
What might be making this hard to understand is that it's an odd example. In your example, the correct input for the hosts parameter that is needed to make an instance of Ping needs to come from a property only accessible from an instance of Ping (or its parent Scan).
Any method (or property) that has self as a parameter relies on a specific instance of that class which needs to be created first. If there was a staticmethod or classmethod they would be callable directly from the class.
You can only get and set target_hosts from a specific instance of the class (in this case either Scan or Ping). If you call Scan.target_hosts or Ping.target_hosts, it will return something like <property at 0x51cd188>. This is basically returning an unusable function from the class. It's saying, "The class dictionary contains instructions here on how to return some useful stuff from AN INSTANCE of <class>."
If you make an instance of Ping or Scan, you now have access to your target_hosts property.
>>> scan = Scan()
>>> scan.target_hosts = 'host1, host2, host3'
>>> scan.target_hosts
'host1 host2 host3'
>>> ping = Ping('stuff', 'nonsense')
>>> ping.hosts
'nonsense'
>>> ping.target_hosts = 'host4, host5, host6'
>>> ping.target_hosts
'host4 host5 host6'
You could run your script with a dummy Ping instance. This should work.
from models.scan import Scan
from models.ping import Ping
dummy = Ping('ignore', 'this')
hosts = "192.168.0.0/24"
dummy.target_hosts = hosts
pinger = Ping(ping_string, dummy.target_hosts)
Or, if Scan had a staticmethod, Ping could use it as well.
class Scan(object):
""" Super class for scans """
identifier = str(random.getrandbits(128))
timestamp = int(time.time())
results_dir = "/tmp/{}/".format(identifier)
total_hosts = 0
def __init__(self, target_hosts=None, target_ports=None):
self.__target_hosts = target_hosts
self.__target_ports = target_ports
self.scan_string = "-sT -O --script auth,vuln"
#staticmethod
def prep_hosts(hosts):
return hosts.replace(", ", " ")
...
and then
from models.scan import Scan
from models.ping import Ping
hosts = "192.168.0.0/24"
input_hosts = Ping.prep_hosts(hosts) # or Scan.prep_hosts(hosts)
pinger = Ping(ping_string, input_hosts)
Related
I am currently designing a software which needs to manage a certain hardware setup.
The hardware setup is as following :
System - The system contains two identical devices, and has certain functionality relative to the entire system.
Device - Each device contains two identical sub devices, and has certain functionality relative to both sub devices.
Sub device - Each sub device has 4 configurable entities (Controlled via the same hardware command - thus I don't count them as a sub-sub device).
What I want to achieve :
I want to control all configurable entities via the system manager (the entities are counted in a serial way), meaning I would be able to do the following :
system_instance = system_manager_class(some_params)
system_instance.some_func(0) # configure device_manager[0].sub_device_manager[0].entity[0]
system_instance.some_func(5) # configure device_manager[0].sub_device_manager[1].entity[1]
system_instance.some_func(8) # configure device_manager[1].sub_device_manager[1].entity[0]
What I have thought of doing :
I was thinking of creating an abstract class, which contains all sub device functions (with a call to a conversion function) and have the system_manager, device_manager and sub_device_manager inherit it. Thus all classes will have the same function name and I will be able to access them via the system manager.
Something around these lines :
class abs_sub_device():
#staticmethod
def convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
def set_entity_to_2(entity_num):
sub_manager, sub_manager_entity_num = self.convert_entity(entity_num)
sub_manager.some_func(sub_manager_entity_num)
class system_manager(abs_sub_device):
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
def convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manage, relevant_entity
class device_manager(abs_sub_device):
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
def convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager(abs_sub_device):
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_2(self, entity_num):
self.entity_list[entity_num] = 2
The code is for generic understanding of my design, not for actual functionality.
The problem :
It seems to me that the system I am trying to design is really generic and that there must be a built-in python way to do this, or that my entire object oriented look at it is wrong.
I would really like to know if some one has a better way of doing this.
After much thinking, I think I found a pretty generic way to solve the issue, using a combination of decorators, inheritance and dynamic function creation.
The main idea is as following :
1) Each layer dynamically creates all sub layer relevant functions for it self (Inside the init function, using a decorator on the init function)
2) Each function created dynamically converts the entity value according to a convert function (which is a static function of the abs_container_class), and calls the lowers layer function with the same name (see make_convert_function_method).
3) This basically causes all sub layer function to be implemented on the higher level with zero code duplication.
def get_relevant_class_method_list(class_instance):
method_list = [func for func in dir(class_instance) if callable(getattr(class_instance, func)) and not func.startswith("__") and not func.startswith("_")]
return method_list
def make_convert_function_method(name):
def _method(self, entity_num, *args):
sub_manager, sub_manager_entity_num = self._convert_entity(entity_num)
function_to_call = getattr(sub_manager, name)
function_to_call(sub_manager_entity_num, *args)
return _method
def container_class_init_decorator(function_object):
def new_init_function(self, *args):
# Call the init function :
function_object(self, *args)
# Get all relevant methods (Of one sub class is enough)
method_list = get_relevant_class_method_list(self.container_list[0])
# Dynamically create all sub layer functions :
for method_name in method_list:
_method = make_convert_function_method(method_name)
setattr(type(self), method_name, _method)
return new_init_function
class abs_container_class():
#staticmethod
def _convert_entity(self):
sub_manager = None
sub_entity_num = None
pass
class system_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.device_manager_list = [] # Initiliaze device list
self.device_manager_list.append(device_manager())
self.device_manager_list.append(device_manager())
self.container_list = self.device_manager_list
def _convert_entity(self, entity_num):
relevant_device_manager = self.device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_device_manager, relevant_entity
class device_manager(abs_container_class):
#container_class_init_decorator
def __init__(self):
self.sub_device_manager_list = [] # Initiliaze sub device list
self.sub_device_manager_list.append(sub_device_manager())
self.sub_device_manager_list.append(sub_device_manager())
self.container_list = self.sub_device_manager_list
def _convert_entity(self, entity_num):
relevant_sub_device_manager = self.sub_device_manager_list[entity_num // 4]
relevant_entity = entity_num % 4
return relevant_sub_device_manager, relevant_entity
class sub_device_manager():
def __init__(self):
self.entity_list = [0] * 4
def set_entity_to_value(self, entity_num, required_value):
self.entity_list[entity_num] = required_value
print("I set the entity to : {}".format(required_value))
# This is used for auto completion purposes (Using pep convention)
class auto_complete_class(system_manager, device_manager, sub_device_manager):
pass
system_instance = system_manager() # type: auto_complete_class
system_instance.set_entity_to_value(0, 3)
There is still a little issue with this solution, auto-completion would not work since the highest level class has almost no static implemented function.
In order to solve this I cheated a bit, I created an empty class which inherited from all layers and stated to the IDE using pep convention that it is the type of the instance being created (# type: auto_complete_class).
Does this solve your Problem?
class EndDevice:
def __init__(self, entities_num):
self.entities = list(range(entities_num))
#property
def count_entities(self):
return len(self.entities)
def get_entity(self, i):
return str(i)
class Device:
def __init__(self, sub_devices):
self.sub_devices = sub_devices
#property
def count_entities(self):
return sum(sd.count_entities for sd in self.sub_devices)
def get_entity(self, i):
c = 0
for index, sd in enumerate(self.sub_devices):
if c <= i < sd.count_entities + c:
return str(index) + " " + sd.get_entity(i - c)
c += sd.count_entities
raise IndexError(i)
SystemManager = Device # Are the exact same. This also means you can stack that infinite
sub_devices1 = [EndDevice(4) for _ in range(2)]
sub_devices2 = [EndDevice(4) for _ in range(2)]
system_manager = SystemManager([Device(sub_devices1), Device(sub_devices2)])
print(system_manager.get_entity(0))
print(system_manager.get_entity(5))
print(system_manager.get_entity(15))
I can't think of a better way to do this than OOP, but inheritance will only give you one set of low-level functions for the system manager, so it wil be like having one device manager and one sub-device manager. A better thing to do will be, a bit like tkinter widgets, to have one system manager and initialise all the other managers like children in a tree, so:
system = SystemManager()
device1 = DeviceManager(system)
subDevice1 = SubDeviceManager(device1)
device2 = DeviceManager(system)
subDevice2 = SubDeviceManager(device2)
#to execute some_func on subDevice1
system.some_func(0, 0, *someParams)
We can do this by keeping a list of 'children' of the higher-level managers and having functions which reference the children.
class SystemManager:
def __init__(self):
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class DeviceManager:
def __init__(self, parent):
parent.children.append(self)
self.children = []
def some_func(self, child, *params):
self.children[child].some_func(*params)
class SubDeviceManager:
def __init__(self, parent):
parent.children.append(self)
#this may or may not have sub-objects, if it does we need to make it its own children list.
def some_func(self, *params):
#do some important stuff
Unfortunately, this does mean that if we want to call a function of a sub-device manager from the system manager without having lots of dots, we will have to define it again again in the system manager. What you can do instead is use the built-in exec() function, which will take in a string input and run it using the Python interpreter:
class SystemManager:
...
def execute(self, child, function, *args):
exec("self.children[child]."+function+"(*args)")
(and keep the device manager the same)
You would then write in the main program:
system.execute(0, "some_func", 0, *someArgs)
Which would call
device1.some_func(0, someArgs)
Here's what I'm thinking:
SystemManager().apply_to_entity(entity_num=7, lambda e: e.value = 2)
class EntitySuperManagerMixin():
"""Mixin to handle logic for managing entity managers."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # Supports any kind of __init__ call.
self._entity_manager_list = []
def apply_to_entity(self, entity_num, action):
relevant_entity_manager = self._entity_manager_list[index // 4]
relevant_entity_num = index % 4
return relevant_entity_manager.apply_to_entity(
relevant_entity_num, action)
class SystemManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.device_manager_list = self._entity_manager_list
self.device_manager_list.extend(DeviceManager() for _ in range(4))
class DeviceManager(EntitySuperManagerMixin):
def __init__(self):
super().__init__()
# An alias for _entity_manager_list to improve readability.
self.sub_device_manager_list = self._entity_manager_list
self.sub_device_manager_list.extend(SubDeviceManager() for _ in range(4))
class SubDeviceManager():
"""Manages entities, not entity managers, thus doesn't inherit the mixin."""
def __init__(self):
# Entities need to be classes for this idea to work.
self._entity_list = [Entity() for _ in range(4)]
def apply_to_entity(self, entity_num, action):
return action(self._entity_list[entity_num])
class Entity():
def __init__(self, initial_value=0):
self.value = initial_value
With this structure:
Entity-specific functions can stay bound to the Entity class (where it belongs).
Manager-specific code needs to be updated in two places: EntitySuperManagerMixin and the lowest level manager (which would need custom behavior anyway since it deals with the actual entities, not other managers).
The way i see it if you want to dynamically configure different part of system you need some sort of addressing so if you input an ID or address with some parameter the system will know with address on which sub sistem you are talking about and then configure that system with parameter.
OOP is quite ok for that and then you can easily manipulate such data via bitwise operators.
So basic addressing is done via binary system , so to do that in python you need first to implement an address static attribute to your class with perhaps some basic further detailing if system grows.
Basic implementation of addres systems is as follows:
bin(71)
1010 1011
and if we divide it into nibbles
1010 - device manager 10
1011 - sub device manager 11
So in this example we have system of 15 device managers and 15 sub device menagers, and every device and sub device manager has its integer address.So let's say you want to access device manager no10 with sub device manager no11. You would need their address which is in binary 71 and you would go with:
system.config(address, parameter )
Where system.config funcion would look like this:
def config(self,address, parameter):
device_manager = (address&0xF0)>>4 #10
sub_device_manager = address&0xf # 11
if device_manager not in range(self.devices): raise LookupError("device manager not found")
if sub_device_manager not in range(self.devices[device_manager].device): raise LookupError("sub device manager not found")
self.devices[device_manager].device[sub_device_manager].implement(parameter)
In layman you would tell system that sub_device 11 from device 10 needs configuration with this parameter.
So how would this setup look in python inheritance class of some base class of system that could be then composited/inherited to different classes:
class systems(object):
parent = None #global parent element, defaults to None well for simplicity
def __init__(self):
self.addrMASK = 0xf # address mask for that nibble
self.addr = 0x1 # default address of that element
self.devices = [] # list of instances of device
self.data = { #some arbitrary data
"param1":"param_val",
"param2":"param_val",
"param3":"param_val",
}
def addSubSystem(self,sub_system): # connects elements to eachother
# checks for valiability
if not isinstance(sub_system,systems):
raise TypeError("defined input is not a system type") # to prevent passing an integer or something
# appends a device to system data
self.devices.append(sub_system)
# search parent variables from sub device manager to system
obj = self
while 1:
if obj.parent is not None:
obj.parent.addrMASK<<=4 #bitshifts 4 bits
obj.parent.addr <<=4 #bitshifts 4 bits
obj = obj.parent
else:break
#self management , i am lazy guy so i added this part so i wouldn't have to reset addresses manualy
self.addrMASK <<=4 #bitshifts 4 bits
self.addr <<=4 #bitshifts 4 bits
# this element is added so the obj address is coresponding to place in list, this could be done more eloquently but i didn't know what are your limitations
if not self.devices:
self.devices[ len(self.devices)-1 ].addr +=1
self.devices[ len(self.devices)-1 ].parent = self
# helpful for checking data ... gives the address of system
def __repr__(self):
return "system at {0:X}, {1:0X}".format(self.addr,self.addrMASK)
# extra helpful lists data as well
def __str__(self):
data = [ '{} : {}\n'.format(k,v) for k,v in self.data.items() ]
return " ".join([ repr(self),'\n',*data ])
#checking for data, skips looping over sub systems
def __contains__(self,system_index):
return system_index-1 in range(len(self.data))
# applying parameter change -- just an example
def apply(self,par_dict):
if not isinstance(par_dict,dict):
raise TypeError("parameter must be a dict type")
if any( key in self.data.keys() for key in par_dict.keys() ):
for k,v in par_dict.items():
if k in self.data.keys():
self.data[k]=v
else:pass
else:pass
# implementing parameters trough addresses
def implement(self,address,parameter_dictionary):
if address&self.addrMASK==self.addr:
if address-self.addr!=0:
item = (address-self.addr)>>4
self.devices[item-1].implement( address-self.addr,parameter_dictionary )
else:
self.apply(parameter_dictionary)
a = systems()
b = systems()
a.addSubSystem(b)
c = systems()
b.addSubSystem(c)
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')
a.implement(0x100,{"param1":"a"})
a.implement(0x110,{"param1":"b"})
a.implement(0x111,{"param1":"c"})
print('a')
print(a)
print('')
print('b')
print(b)
print('')
print('c')
print(c)
print('')
I'm coding a virtual assistant in Python, and I want to store each request, response and possible error in a database.
I'm using one class for request, another class for response and another class for error.
How can I create an ID variable that is shared for the respectives classes instances, for example:
First run of the program (the normal and correct running of the program):
request_id = 1
response_id = 1
Second run (an error occurred and stopped the program to proceed to the response class):
request_id = 2
error_id = 2
Third run (the program ran fine and the response class skipped the id 2 -
that is the behavior that I want):
request_id = 3
response_id = 3
Note that in the third run, that response_id received the id 3 and the response_id = 2 will never exist, cause in the second run the proccess started with request and stopped in the error.
The ID variable must be always unique, even when my program crashes and I must restart him. I know I could grab the last id in the database when my program runs, but there's a way to do it without envolving the database?
Since you are using database to store the request and response why don't you use database to generate this id for you.
This can be done by creating the table with primary key int auto increment. Every request/response should be inserted into database, and the database will generate an unique id for each record inserted.
a possible solution would be to use Pyro4 instead of a DB if you don't want it. You can use the following code:
Tracker.py
import Pyro4
#Pyro4.expose
#Pyro4.behavior(instance_mode="single")
class Tracker(object):
def __init__(self):
self._id = None
def setId(self, value):
print "set well :)", value
self._id = value
print self._id
def getId(self):
print "returning", self._id
return self._id
daemon = Pyro4.Daemon()
uri = daemon.register(Tracker)
print("URI: ", uri)
daemon.requestLoop()
Status.py
import Pyro4
class Status(object):
def __init__(self, id):
self._id = id
self._pyro = None
def connect(self, target):
self._pyro = Pyro4.Proxy(target)
def updateId(self):
if ( not self._pyro is None ):
self._pyro.setId(self._id)
print "updated"
else:
print "please connect"
def getId(self):
if ( not self._pyro is None ):
return self._pyro.getId()
else:
print "please connect"
Success.py
from Status import *
class Success(Status):
def __init__(self):
super(Success,self).__init__(1)
Wait.py
from Status import *
class Wait(Status):
def __init__(self):
super(Wait,self).__init__(1)
Error.py
from Status import *
class Error(Status):
def __init__(self):
super(Error,self).__init__(3)
run.py
from Success import *
from Wait import *
from Error import *
#just an example
s = Success()
w = Wait()
e = Error()
s.connect("PYRO:obj_c98931f8b95d486a9b52cf0edc61b9d6#localhost:51464")
s.updateId()
print s.getId()
w.connect("PYRO:obj_c98931f8b95d486a9b52cf0edc61b9d6#localhost:51464")
w.updateId()
print s.getId()
e.connect("PYRO:obj_c98931f8b95d486a9b52cf0edc61b9d6#localhost:51464")
e.updateId()
print s.getId()
Of course you would need to use a different URI but you should have a good idea now. using Pyro you could also specify a static URI name if needed.
The output should be:
$ c:\Python27\python.exe run.py
updated
1
updated
2
updated
3
HTH
When I execute this program I get an empty list:
I am expecting it to create the list of objects and append the objects to the obj_list_addresses list.
Then when I call the get_good_addresses() I expect it to go back through that list and execute code on each object in the list only the list returns empty [] almost like its getting overwritten.
I am fairly new to python and know that I am missing something important.
Main:
from address import Address
from address_processor import AddressProcessor
addresses=[]
addresses = open('test.txt').read().splitlines()
proccess_addresses = AddressProcessor(addresses)
proccess_addresses.create_addresses_obj()
proccess_addresses.get_good_addresses()
AddressProcessor Class:
import multiprocessing
from address import Address
class AddressProcessor(object):
"""AddressProcessor will process a txt file with addresses"""
def __init__(self, addresses):
self.addresses = addresses
self.return_addresses = []
self.obj_list_addresses = []
def create_addresses_obj(self):
jobs = []
for address in self.addresses:
process = multiprocessing.Process(target=self.worker, args=(address,))
jobs.append(process)
process.start()
for job in jobs:
job.join()
print('created objects for addresses in text file')
def worker(self, address):
obj = Address(address)
self.obj_list_addresses.append(obj)
def get_good_addresses(self):
print self.obj_list_addresses
Address Class:
from string import replace
from pprint import pprint
class Address(object):
"""
This is address class send it an address it will look up
the addy and return json string of the parcels that matched the address
then update status if it was the only one returned its good if not its bad
"""
def __init__(self, address):
self.address = address
self.status = ''
self.json_string = ''
self.set_json_string()
def get_address(self):
return self.address
def set_json_string(self):
r = requests.get('urlbasegoeshere'+replace(self.address," ","+")+'&pagesize=40&page=1')
self.json_string = r.json
self.set_status()
def set_status(self):
if len(self.json_string) == 1:
self.status = 1
elif len(self.json_string)!=1:
self.status = 0
def get_status(self):
return self.status
Why are you using 'multiprocessing' to create address objects? Different process don't share memory, i.e. they don't share objects. This is not a python thing, it's the same whatever language you use.
Replace these three lines
process = multiprocessing.Process(target=self.worker, args=(address,))
jobs.append(process)
process.start()
with
self.worker(address)
When I create an object in some method, I can't use it in any other method. So the use of the object is limited just to that method. But I would like to create the object somehow, that could use it in my whole module.
Here is the code of the module in which I want to create the object so I could use it in every method. (It's not so important what it should do, but for those who cares, it'll be network configurator which using netlink socket to communicate with the kernel).
In the method configureBridge() (the 4th method from the beginning) I tried to create an object and use it (ip = IPRoute() ... ip.release()) and it worked, but I couldn't use the object variable ip in any other function apart from configureBridge(). Could someone help me with that?
class PyrouteTwo(Configurator):
def __init__(self, inRollback=False):
super(PyrouteTwo, self).__init__(ConfigApplier(), inRollback)
self.runningConfig = RunningConfig()
logging.debug("testmark.PyR2.init")
def begin(self):
if self.configApplier is None:
self.configApplier = ConfigApplier()
if self.runningConfig is None:
self.runningConfig = RunningConfig()
logging.debug("testmark.PyR2.begin")
def commit(self):
self.configApplier = None
self.runningConfig.save()
self.runningConfig = None
logging.debug("testmark.PyR2.commit")
def configureBridge(self, bridge, **opts):
self.configApplier.addBridge(bridge)
if bridge.port:
bridge.port.configure(**opts)
self.configApplier.addBridgePort(bridge)
self.configApplier.setIfaceConfigAndUp(bridge)
logging.debug("testmark.PyR2.confBridge..")
# I am using the object here:
ip = IPRoute(fork=True)
dev = ip.link_lookup(ifname='em1')[0]
logging.debug("pyroute2 link_lookup output: %d", dev)
ip.release()
# there are some similar functions like configureVAN etc. in which I want
# to use the object
class ConfigApplier(object):
def _setIpConfig(self, iface):
ipConfig = iface.ipConfig
logging.debug("testmark.PyR2.ConfApplier.setIpConf.")
if ipConfig.ipaddr:
self.removeIpConfig(iface)
ipwrapper.addrAdd(iface.name, ipConfig.ipaddr,
ipConfig.netmask)
if ipConfig.gateway and ipConfig.defaultRoute:
ipwrapper.routeAdd(['default', 'via', ipConfig.gateway])
def removeIpConfig(self, iface):
ipwrapper.addrFlush(iface.name)
def setIfaceMtu(self, iface, mtu):
ipwrapper.linkSet(iface, ['mtu', str(mtu)])
def ifup(self, iface):
ipwrapper.linkSet(iface.name, ['up'])
if iface.ipConfig.bootproto == 'dhcp':
dhclient = DhcpClient(iface.name)
dhclient.start(iface.ipConfig.async)
def ifdown(self, iface):
ipwrapper.linkSet(iface.name, ['down'])
dhclient = DhcpClient(iface.name)
dhclient.shutdown()
def setIfaceConfigAndUp(self, iface):
if iface.ip:
self._setIpConfig(iface)
if iface.mtu:
self.setIfaceMtu(iface.name, iface.mtu)
self.ifup(iface)
def addBridge(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'addbr', bridge.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFUP, err)
def addBridgePort(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'addif', bridge.name,
bridge.port.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFUP, err)
def removeBridge(self, bridge):
rc, _, err = execCmd([EXT_BRCTL, 'delbr', bridge.name])
if rc != 0:
raise ConfigNetworkError(ERR_FAILED_IFDOWN, err)
logging.debug("testmarkPyR2.ConfApplier.remBridge")
# ...
def createLibvirtNetwork(self, network, bridged, iface, qosInbound=None,
qosOutbound=None):
netXml = libvirtCfg.createNetworkDef(network, bridged, iface,
qosInbound, qosOutbound)
libvirtCfg.createNetwork(netXml)
logging.debug("testmarkPyR2.ConfApplier.createLibvirtNetwork")
def removeLibvirtNetwork(self, network):
libvirtCfg.removeNetwork(network)
logging.debug("testmarkPyR2.ConfApplier.remLibvirtNetwork")
You can either declare it as object specific attibute by doing -
self.ip = value # Now its a object specific variable
or make it a class veriable before assign it a value.
class PyrouteTwo(Configurator):
ip = None # Now its a class variable
I am new to python and finding it really difficult trying to understand how to send files using sockets with a tcp connection
i found this code in another question that seems to be useful
Client Side
def _sendFile(self, path):
sendfile = open(path, 'rb')
data = sendfile.read()
self._con.sendall(encode_length(len(data))) # Send the length as a fixed size message
self._con.sendall(data)
# Get Acknowledgement
self._con.recv(1) # Just 1 byte
Server Side
def _recieveFile(self, path):
LENGTH_SIZE = 4 # length is a 4 byte int.
# Recieve the file from the client
writefile = open(path, 'wb')
length = decode_length(self.con.read(LENGTH_SIZE) # Read a fixed length integer, 2 or 4 bytes
while (length):
rec = self.con.recv(min(1024, length))
writefile.write(rec)
length -= sizeof(rec)
self.con.send(b'A') # single character A to prevent issues with buffering
Now i have two problems with this code
First
self._con.sendall(encode_length(len(data)))
in this line it gives me an error saying encode_length is undefined
Secondly these are functions that send and receive file
Where do i call them
Do i first form a TCP Connection and then call these functions
And how exactly to call them , if i call them directly it gives me an error on client side saying _sendFile(self, path) takes two arguments (since i am not passing self just the path)
Thirdly i am using function from os library to get complete path , So i am calling the function like
_sendFile(os.path.abspath("file_1.txt"))
is this the correct way to pass the argument
Sorry i know this question is pretty basic and lame but everywhere online i can basically get the function but not how to call it
right now this is how i am calling the function
serverIP = '192.168.0.102'
serverPort = 21000
clientSocket = socket(AF_INET, SOCK_STREAM)
message = "Want to Backup a Directory"
clientSocket.connect((serverIP, serverPort))
_sendFile(os.path.abspath("file_1.txt"))
Which is basically wrong
I am using the same computer for both Client and Server
Running Python on Ubuntu using terminal
First problem:
It's because you simply haven't defined functions encode/decode_lenght.
Second problem:
Your function is: def _sendFile(self, path): ....
Do you know how to use self? It is used in the classes. So define it without self, or use classes:
Example:
from socket import *
class Client(object):
def __init__(self):
self.clientSocket = socket(AF_INET, SOCK_STREAM)
def connect(self, addr):
self.clientSocket.connect(addr)
def _sendFile(self, path):
sendfile = open(path, 'rb')
data = sendfile.read()
self._con.sendall(encode_length(len(data))) # Send the length as a fixed size message
self._con.sendall(data)
# Get Acknowledgement
self._con.recv(1) # Just 1 byte
>>> client = Client()
>>> client.connect(("192.168.0.102", 21000))
>>> client._sendFile(os.path.abspath("file_1.txt")) # If this file is in your current directory, you may just use "file_1.txt"
And same (almost) for Server.
Where to define these functions? In the code ofcorse! What should your functions do?
OK, an example:
def encode_length(l):
#Make it 4 bytes long
l = str(l)
while len(l) < 4:
l = "0"+l
return l
# Example of using
>>> encode_length(4)
'0004'
>>> encode_length(44)
'0044'
>>> encode_length(444)
'0444'
>>> encode_length(4444)
'4444'
About self:
Just a little bit:
self redirects to your current object, ex:
class someclass:
def __init__(self):
self.var = 10
def get(self):
return self.var
>>> c = someclass()
>>> c.get()
10
>>> c.var = 20
>>> c.get()
20
>>> someclass.get(c)
20
>>>
How does someclass.get(c) work?
While executing someclass.get(c), we are not creating an new instance of someclass.
When we call .get() from an someclass instance, it automaticly sets self to our instance object. So someclass.get(c) == c.get()
And if we try to do someclass.get(), self was not defined, so it will raise an error:
TypeError: unbound method get() must be called with someclass instance as first argument (got nothing instead)
You can use decorator to call functions of a class (not its instance!):
class someclass:
def __init__(self):
self.var = 10
def get(self):
return 10 # Raises an error
#classmethod
def get2(self):
return 10 # Returns 10!
Sorry for my bad explanations, my English is not perfect
Here are some links:
server.py
client.py