I am trying to learn executing command on remote system using python paramiko code.
I am receiving partial output of length 12519 characters.
How can I get complete result of length 12550 or more?
Here is the code, this is a code taken from a different site in internet only
def _run_poll(self, session, timeout, input_data):
'''
Poll until the command completes.
#param session The session.
#param timeout The timeout in seconds.
#param input_data The input data.
#returns the output
'''
interval = 0.1
maxseconds = timeout
maxcount = maxseconds / interval
# Poll until completion or timeout
# Note that we cannot directly use the stdout file descriptor
# because it stalls at 64K bytes (65536).
input_idx = 0
timeout_flag = False
self.info('polling (%d, %d)' % (maxseconds, maxcount))
start = datetime.datetime.now()
start_secs = time.mktime(start.timetuple())
output = ''
session.setblocking(0)
while True:
if session.recv_ready():
data = session.recv(self.bufsize)
output += data
self.info('read %d bytes, total %d' % (len(data), len(output)))
print('[{}]'.format(output))
#this prints partial output of 12519 characters
if session.send_ready():
# We received a potential prompt.
# In the future this could be made to work more like
# pexpect with pattern matching.
if input_idx < len(input_data):
data = input_data[input_idx] + '\n'
input_idx += 1
self.info('sending input data %d' % (len(data)))
session.send(data)
self.info('session.exit_status_ready() = %s' % (str(session.exit_status_ready())))
if session.exit_status_ready():
print('here') #this line is also printed
break
# Timeout check
now = datetime.datetime.now()
now_secs = time.mktime(now.timetuple())
et_secs = now_secs - start_secs
self.info('timeout check %d %d' % (et_secs, maxseconds))
if et_secs > maxseconds:
self.info('polling finished - timeout')
timeout_flag = True
break
time.sleep(0.200)
self.info('polling loop ended')
if session.recv_ready():
data = session.recv(self.bufsize)
output += data
self.info('read %d bytes, total %d' % (len(data), len(output)))
self.info('polling finished - %d output bytes' % (len(output)))
if timeout_flag:
self.info('appending timeout message')
output += '\nERROR: timeout after %d seconds\n' % (timeout)
session.close()
return output
Related
I want to test a data center routing algorithm using Mininet. The traffic needs to conform to certain parameters:
It should consist of "files" of various sizes (note that these don't actually have to be files; traffic generated in, e.g., iperf is OK, as long as the size is controllable);
The file sizes should be drawn from a particular distribution;
The source/destination host pairs between which data is sent should be selected randomly for a given file;
The interval between when a file is sent and its successor is sent should be random; and
If a huge file gets sent between two hosts that takes a long time to transfer, it should still be possible to send data between other hosts in the network.
Points 1-4 are taken care of. I've been struggling with #5 for days and I can't get it working properly. My initial thought was to spawn subprocesses/threads to send iperf commands to the hosts:
while count < 10:
if (count % 2) == 0:
host_pair = net.get("h1", "h2")
else:
host_pair = net.get("h3", "h4")
p = multiprocessing.Process(target=test_custom_iperf, args=(net, host_pair, nbytes))
p.daemon = True
p.start()
time.sleep(random.uniform(0, 1))
The command test_custom_iperf is modeled after the Python Mininet API's version of iperf to include the -n transfer size parameter:
client, server = host_pair
print client, server
output( '*** Iperf: testing TCP bandwidth between',
client, 'and', server, '\n' )
server.sendCmd( 'iperf -s' )
if not waitListening( client, server.IP(), 5001 ):
raise Exception( 'Could not connect to iperf on port 5001' )
cliout = client.cmd( 'iperf -c ' + server.IP() + ' -n %d' % nbytes )
print cliout
server.sendInt()
servout = server.waitOutput()
debug( 'Server output: %s\n' % servout)
result = [ net._parseIperf( servout ), net._parseIperf( cliout ) ]
output( '*** Results: %s\n' % result )
Making this non-blocking has been extremely difficult. I need to be able to send the server.sendInt() command, for some reason, and to do this I need to wait for the client's command to finish.
I'd appreciate any advice on what I can try to make this work!
I took a hint from here and used Mininet's host.popen() module to send the data around. Hopefully this helps someone else:
def send_one_file(file_dir, host_pair, files):
src, dst = host_pair # a tuple of Mininet node objects
# choose a random file from files
rand_fname = random.sample(files, 1)[0]
rand_path = os.path.join(file_dir, rand_fname)
port = random.randint(1024, 65535)
# Start listening at the destination host
dst_cmd = 'nc -l %d > /home/mininet/sent/%s.out' % (port, rand_fname)
print os.path.getsize(rand_path)
dst.popen( dst_cmd, shell=True )
# Send file from the source host
src_cmd = 'nc %s %s < %s' % (dst.IP(), port, rand_path)
src.popen( src_cmd, shell=True )
Then the parent function calls send_one_file() at random intervals:
def test_netcat_subprocess_async(net, duration):
file_dir = "/home/mininet/sf_mininet_vm/data/MVI_0406_split"
files = os.listdir(file_dir)
start_time = time.time()
end_time = start_time + duration
# Transfer for the desired duration
while time.time() < end_time:
# Choose a pair of hosts
host_pair = random.sample(net.hosts, 2)
test_send_one_file_netcat(file_dir, host_pair, files)
interval = random.uniform(0.01, 0.1)
print "Initialized transfer; waiting %f seconds..." % interval
time.sleep(interval)
This works without any of the problems I experienced with multiprocessing or threading (breaking the network after the session is over, blocking when it shouldn't, etc.).
I have a python program that needs to executed like this;
$ sudo python my_awesome_program.py
Now I want to run thousands of instances of this program using celery, of course with different parameters. The Problem is that while celery tries to execute my program it fails and the reason is that it doesn't have sudo access.
How can I give my celery workers the power of sudo to run this program ?
I already tried to give sudo access to my user. Changed celery service owner etc.
May be a stupid question, but I am lost.
P.S
task.py
from celery import Celery
import os
import socket
import struct
import select
import time
import logging
# Global variables
broker = "redis://%s:%s" % ("127.0.0.1", '6379')
app = Celery('process_ips', broker=broker)
logging.basicConfig(filename="/var/log/celery_ping.log", level=logging.INFO)
# From /usr/include/linux/icmp.h; your milage may vary.
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris.
def checksum(source_string):
"""
I'm not too confident that this is right but testing seems
to suggest that it gives the same answers as in_cksum in ping.c
"""
sum = 0
countTo = (len(source_string) / 2) * 2
count = 0
while count < countTo:
thisVal = ord(source_string[count + 1]) * \
256 + ord(source_string[count])
sum = sum + thisVal
sum = sum & 0xffffffff # Necessary?
count = count + 2
if countTo < len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
# Swap bytes. Bugger me if I know why.
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receive_one_ping(my_socket, ID, timeout):
"""
receive the ping from the socket.
"""
timeLeft = timeout
while True:
startedSelect = time.time()
whatReady = select.select([my_socket], [], [], timeLeft)
howLongInSelect = (time.time() - startedSelect)
if whatReady[0] == []: # Timeout
return
timeReceived = time.time()
recPacket, addr = my_socket.recvfrom(1024)
icmpHeader = recPacket[20:28]
type, code, checksum, packetID, sequence = struct.unpack(
"bbHHh", icmpHeader
)
if packetID == ID:
bytesInDouble = struct.calcsize("d")
timeSent = struct.unpack("d", recPacket[28:28 + bytesInDouble])[0]
return timeReceived - timeSent
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return
def send_one_ping(my_socket, dest_addr, ID):
"""
Send one ping to the given >dest_addr<.
"""
dest_addr = socket.gethostbyname(dest_addr)
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
# Make a dummy heder with a 0 checksum.
header = struct.pack("bbHHh", ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)
bytesInDouble = struct.calcsize("d")
data = (192 - bytesInDouble) * "Q"
data = struct.pack("d", time.time()) + data
# Calculate the checksum on the data and the dummy header.
my_checksum = checksum(header + data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"bbHHh", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1
)
packet = header + data
my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1
def do_one(dest_addr, timeout):
"""
Returns either the delay (in seconds) or none on timeout.
"""
logging.info('Called do_one Line 105')
icmp = socket.getprotobyname("icmp")
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.error as xxx_todo_changeme:
(errno, msg) = xxx_todo_changeme.args
if errno == 1:
# Operation not permitted
msg = msg + (
" - Note that ICMP messages can only be sent from processes"
" running as root."
)
raise socket.error(msg)
raise # raise the original error
my_ID = os.getpid() & 0xFFFF
send_one_ping(my_socket, dest_addr, my_ID)
delay = receive_one_ping(my_socket, my_ID, timeout)
my_socket.close()
return delay
def verbose_ping(dest_addr, timeout=1, count=2):
"""
Send >count< ping to >dest_addr< with the given >timeout< and display
the result.
"""
logging.info('Messing with : %s' % dest_addr)
try:
for i in xrange(count):
logging.info('line 136')
try:
delay = do_one(dest_addr, timeout)
logging.info('line 139' + str(delay))
except socket.gaierror as e:
break
logging.info('line 142'+str(delay))
if delay is None:
pass
else:
delay = delay * 1000
logging.info('This HO is UP : %s' % dest_addr)
return dest_addr
except:
logging.info('Error in : %s' % dest_addr)
#app.task()
def process_ips(items):
logging.info('This is Items:----: %s' % items)
up_one = verbose_ping(items)
if up_one is not None:
logging.info('This one is UP: %s' % up_one)
my_awesome_program.py
from task import process_ips
if __name__ == '__main__':
for i in range(0, 256):
for j in range(1, 256):
ip = "192.168.%s.%s" % (str(i), str(j))
jobs = process_ips.delay(ip)
/etc/defaults/celeryd
# Names of nodes to start
# most will only start one node:
CELERYD_NODES="worker1"
# but you can also start multiple and configure settings
# for each in CELERYD_OPTS (see `celery multi --help` for examples).
#CELERYD_NODES="worker1 worker2 worker3"
# Absolute or relative path to the 'celery' command:
CELERY_BIN="/home/jarvis/Development/venv/bin/celery"
#CELERY_BIN="/virtualenvs/def/bin/celery"
# App instance to use
# comment out this line if you don't use an app
CELERY_APP="task"
# or fully qualified:
#CELERY_APP="proj.tasks:app"
# Where to chdir at start.
CELERYD_CHDIR="/home/jarvis/Development/pythonScrap/nmap_sub"
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8 --loglevel=DEBUG"
# %N will be replaced with the first part of the nodename.
CELERYD_LOG_FILE="/var/log/celery/%N.log"
CELERYD_PID_FILE="/var/run/celery/%N.pid"
# Workers should run as an unprivileged user.
# You need to create this user manually (or you can choose
# a user/group combination that already exists, e.g. nobody).
CELERYD_USER="jarvis"
CELERYD_GROUP="jarvis"
# If enabled pid and log directories will be created if missing,
# and owned by the userid/group configured.
CELERY_CREATE_DIRS=1
You need to set your CELERYD_USER param in settings.
Have a look here: http://celery.readthedocs.org/en/latest/tutorials/daemonizing.html
If you're using supervisor, then in your supervisor conf of celery, you'd need to do this:
user=<user-you-want>
Also, under Example Configuration section, it's explicitly said to not run your workers as privileged users.
I am trying to get tweets related to the keyword in the code But at the python shell there is nothing its just curson only No traceback nothing.The code is here
import time
import pycurl
import urllib
import json
import oauth2 as oauth
API_ENDPOINT_URL = 'https://stream.twitter.com/1.1/statuses/filter.json'
USER_AGENT = 'TwitterStream 1.0' # This can be anything really
# You need to replace these with your own values
OAUTH_KEYS = {'consumer_key': 'ABC',
'consumer_secret': 'ABC',
'access_token_key': 'ABC',
'access_token_secret': 'ABC'}
# These values are posted when setting up the connection
POST_PARAMS = {'include_entities': 0,
'stall_warning': 'true',
'track': 'iphone,ipad,ipod'}
class TwitterStream:
def __init__(self, timeout=False):
self.oauth_token = oauth.Token(key=OAUTH_KEYS['access_token_key'], secret=OAUTH_KEYS['access_token_secret'])
self.oauth_consumer = oauth.Consumer(key=OAUTH_KEYS['consumer_key'], secret=OAUTH_KEYS['consumer_secret'])
self.conn = None
self.buffer = ''
self.timeout = timeout
self.setup_connection()
def setup_connection(self):
""" Create persistant HTTP connection to Streaming API endpoint using cURL.
"""
if self.conn:
self.conn.close()
self.buffer = ''
self.conn = pycurl.Curl()
# Restart connection if less than 1 byte/s is received during "timeout" seconds
if isinstance(self.timeout, int):
self.conn.setopt(pycurl.LOW_SPEED_LIMIT, 1)
self.conn.setopt(pycurl.LOW_SPEED_TIME, self.timeout)
self.conn.setopt(pycurl.URL, API_ENDPOINT_URL)
self.conn.setopt(pycurl.USERAGENT, USER_AGENT)
# Using gzip is optional but saves us bandwidth.
self.conn.setopt(pycurl.ENCODING, 'deflate, gzip')
self.conn.setopt(pycurl.POST, 1)
self.conn.setopt(pycurl.POSTFIELDS, urllib.urlencode(POST_PARAMS))
self.conn.setopt(pycurl.HTTPHEADER, ['Host: stream.twitter.com',
'Authorization: %s' % self.get_oauth_header()])
# self.handle_tweet is the method that are called when new tweets arrive
self.conn.setopt(pycurl.WRITEFUNCTION, self.handle_tweet)
def get_oauth_header(self):
""" Create and return OAuth header.
"""
params = {'oauth_version': '1.0',
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time())}
req = oauth.Request(method='POST', parameters=params, url='%s?%s' % (API_ENDPOINT_URL,
urllib.urlencode(POST_PARAMS)))
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.oauth_consumer, self.oauth_token)
return req.to_header()['Authorization'].encode('utf-8')
def start(self):
""" Start listening to Streaming endpoint.
Handle exceptions according to Twitter's recommendations.
"""
backoff_network_error = 0.25
backoff_http_error = 5
backoff_rate_limit = 60
while True:
self.setup_connection()
try:
self.conn.perform()
except:
# Network error, use linear back off up to 16 seconds
print 'Network error: %s' % self.conn.errstr()
print 'Waiting %s seconds before trying again' % backoff_network_error
time.sleep(backoff_network_error)
backoff_network_error = min(backoff_network_error + 1, 16)
continue
# HTTP Error
sc = self.conn.getinfo(pycurl.HTTP_CODE)
if sc == 420:
# Rate limit, use exponential back off starting with 1 minute and double each attempt
print 'Rate limit, waiting %s seconds' % backoff_rate_limit
time.sleep(backoff_rate_limit)
backoff_rate_limit *= 2
else:
# HTTP error, use exponential back off up to 320 seconds
print 'HTTP error %s, %s' % (sc, self.conn.errstr())
print 'Waiting %s seconds' % backoff_http_error
time.sleep(backoff_http_error)
backoff_http_error = min(backoff_http_error * 2, 320)
def handle_tweet(self, data):
""" This method is called when data is received through Streaming endpoint.
"""
self.buffer += data
if data.endswith('\r\n') and self.buffer.strip():
# complete message received
message = json.loads(self.buffer)
self.buffer = ''
msg = ''
if message.get('limit'):
print 'Rate limiting caused us to miss %s tweets' % (message['limit'].get('track'))
elif message.get('disconnect'):
raise Exception('Got disconnect: %s' % message['disconnect'].get('reason'))
elif message.get('warning'):
print 'Got warning: %s' % message['warning'].get('message')
else:
print 'Got tweet with text: %s' % message.get('text')
if __name__ == '__main__':
ts = TwitterStream()
ts.setup_connection()
ts.start()
please help me to resolve the issue with code
I am downloading files over http and displaying the progress using urllib and the following code - which works fine:
import sys
from urllib import urlretrieve
urlretrieve('http://example.com/file.zip', '/tmp/localfile', reporthook=dlProgress)
def dlProgress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\r" + "progress" + "...%d%%" % percent)
sys.stdout.flush()
Now I would also like to restart the download if it is going too slow (say less than 1MB in 15 seconds). How can I achieve this?
This should work.
It calculates the actual download rate and aborts if it is too low.
import sys
from urllib import urlretrieve
import time
url = "http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tgz" # 14.135.620 Byte
startTime = time.time()
class TooSlowException(Exception):
pass
def convertBToMb(bytes):
"""converts Bytes to Megabytes"""
bytes = float(bytes)
megabytes = bytes / 1048576
return megabytes
def dlProgress(count, blockSize, totalSize):
global startTime
alreadyLoaded = count*blockSize
timePassed = time.time() - startTime
transferRate = convertBToMb(alreadyLoaded) / timePassed # mbytes per second
transferRate *= 60 # mbytes per minute
percent = int(alreadyLoaded*100/totalSize)
sys.stdout.write("\r" + "progress" + "...%d%%" % percent)
sys.stdout.flush()
if transferRate < 4 and timePassed > 2: # download will be slow at the beginning, hence wait 2 seconds
print "\ndownload too slow! retrying..."
time.sleep(1) # let's not hammer the server
raise TooSlowException
def main():
try:
urlretrieve(url, '/tmp/localfile', reporthook=dlProgress)
except TooSlowException:
global startTime
startTime = time.time()
main()
if __name__ == "__main__":
main()
Something like this:
class Timeout(Exception):
pass
def try_one(func,t=3):
def timeout_handler(signum, frame):
raise Timeout()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(t) # triger alarm in 3 seconds
try:
t1=time.clock()
func()
t2=time.clock()
except Timeout:
print('{} timed out after {} seconds'.format(func.__name__,t))
return None
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return t2-t1
The call 'try_one' with the func you want to time out and the time to timeout:
try_one(downloader,15)
OR, you can do this:
import socket
socket.setdefaulttimeout(15)
HolyMackerel! Use the tools!
import urllib2, sys, socket, time, os
def url_tester(url = "http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tgz"):
file_name = url.split('/')[-1]
u = urllib2.urlopen(url,None,1) # Note the timeout to urllib2...
file_size = int(u.info().getheaders("Content-Length")[0])
print ("\nDownloading: {} Bytes: {:,}".format(file_name, file_size))
with open(file_name, 'wb') as f:
file_size_dl = 0
block_sz = 1024*4
time_outs=0
while True:
try:
buffer = u.read(block_sz)
except socket.timeout:
if time_outs > 3: # file has not had activity in max seconds...
print "\n\n\nsorry -- try back later"
os.unlink(file_name)
raise
else: # start counting time outs...
print "\nHmmm... little issue... I'll wait a couple of seconds"
time.sleep(3)
time_outs+=1
continue
if not buffer: # end of the download
sys.stdout.write('\rDone!'+' '*len(status)+'\n\n')
sys.stdout.flush()
break
file_size_dl += len(buffer)
f.write(buffer)
status = '{:20,} Bytes [{:.2%}] received'.format(file_size_dl,
file_size_dl * 1.0 / file_size)
sys.stdout.write('\r'+status)
sys.stdout.flush()
return file_name
This prints a status as expected. If I unplug my ethernet cable, I get:
Downloading: Python-2.7.3.tgz Bytes: 14,135,620
827,392 Bytes [5.85%] received
sorry -- try back later
If I unplug the cable, then plug it back in in less than 12 seconds, I get:
Downloading: Python-2.7.3.tgz Bytes: 14,135,620
716,800 Bytes [5.07%] received
Hmmm... little issue... I'll wait a couple of seconds
Hmmm... little issue... I'll wait a couple of seconds
Done!
The file is successfully downloaded.
You can see that urllib2 supports both timeouts and reconnects. If you disconnect and stay disconnected for 3 * 4 seconds == 12 seconds, it will timeout for good and raise a fatal exception. This could be dealt with as well.
I'm making a django app that needs to be able to make emails and then send these out at a given time. I was thinking i could use django-mailer to put things in que and then send it of. But even though theire sample case list, lists that this is a feature, I cant seem to find out how.
What I need is to be able to set a 'when_to_send' field in the message model of django-mailer, and when the cron job fires the send_mail function this needs to filter out the ones that has a 'when_to_send' date that is greater than the current time...
def send_all():
"""
Send all eligible messages in the queue.
"""
lock = FileLock("send_mail")
logging.debug("acquiring lock...")
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logging.debug("lock already in place. quitting.")
return
except LockTimeout:
logging.debug("waiting for the lock timed out. quitting.")
return
logging.debug("acquired.")
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
for message in prioritize():
if DontSendEntry.objects.has_address(message.to_address):
logging.info("skipping email to %s as on don't send list " % message.to_address)
MessageLog.objects.log(message, 2) # ### avoid using literal result code
message.delete()
dont_send += 1
else:
try:
logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), message.to_address.encode("utf-8")))
core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
MessageLog.objects.log(message, 1) # ### avoid using literal result code
message.delete()
sent += 1
except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
message.defer()
logging.info("message deferred due to failure: %s" % err)
MessageLog.objects.log(message, 3, log_message=str(err)) # ### avoid using literal result code
deferred += 1
finally:
logging.debug("releasing lock...")
lock.release()
logging.debug("released.")
logging.info("")
logging.info("%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send))
logging.info("done in %.2f seconds" % (time.time() - start_time))
Anyone see how to customize this function to don't send email's where the message.when_to_send field is greater than the current time?
You need to implement the cron job for django-mailer:
* * * * * (cd $PINAX; /usr/local/bin/python2.5 manage.py send_mail >> $PINAX/cron_mail.log 2>&1)
And then in engine.py line 96:
# Get rid of "while True:"
while not Message.objects.all():
# Get rid of logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP)
# Get rid of sleep
send_all()
You can just add another clause to the conditionals under your message processing loop (you will also need to import datetime at the top of your file):
for message in prioritize():
if DontSendEntry.objects.has_address(message.to_address):
logging.info("skipping email to %s as on don't send list " % message.to_address)
MessageLog.objects.log(message, 2) # ### avoid using literal result code
message.delete()
dont_send += 1
elif message.when_to_send > datetime.datetime.now():
continue
else:
try:
... the rest of your code ...