Python get line number in file - python

I built a python (2.7) script that parses a txt file with this code:
cnt = 1
logFile = open( logFilePath, 'r' )
for line in logFile:
if errorCodeGetHostName in line:
errorHostNameCnt = errorHostNameCnt + 1
errorGenericCnt = errorGenericCnt + 1
reportFile.write( "--- Error: GET HOST BY NAME # line " + str( cnt ) + "\n\r" )
reportFile.write( line )
elif errorCodeSocke462 in line:
errorSocket462Cnt = errorSocket462Cnt + 1
errorGenericCnt = errorGenericCnt + 1
reportFile.write("--- Error: SOCKET -462 # line " + str(cnt) + "\n\r" )
reportFile.write(line)
elif errorCodeMemory in line:
errorMemoryCnt = errorMemoryCnt + 1
errorGenericCnt = errorGenericCnt + 1
reportFile.write("--- Error: MEMORY NOT RELEASED # line " + str(cnt) + "\n\r" )
reportFile.write(line)
cnt = cnt + 1
I want to add the line number of each error, and for this purpose I added a counter (cnt) but its value is not related to to the real line number.
This is a piece of my log file:
=~=~=~=~=~=~=~=~=~=~=~= PuTTY log 2017.06.13 17:05:43 =~=~=~=~=~=~=~=~=~=~=~=
UTC Time fetched from server #1: '0.pool.ntp.org'
*** Test (cycle #1) starting...
--- Test01 completed successfully!
--- Test02 completed successfully!
--- Test03 completed successfully!
--- Test04 completed successfully!
--- Test01 completed successfully!
--- Test02 completed successfully!
INF:[CONFIGURATION] Completed
--- Test03 completed successfully!
Firmware Version: 0.0.0
*** Test (cycle #1) starting...
How can I get the real line number?
Thanks for the help.

apart from the line-ending issue, there are some other issues with this code
Filehandles
as remarked in on of the comments, it is best to open files with a with-statement
Separation of functions
Now you have 1 big loop where you both loop over the original file, parse it and immediately write to the ReportFile. I think it would be best to separate those.
Make one function to loop over the log, return the details you need, and a next function looping over these details and writing them to a report. this is a lot more robust, and easier to debug and test when something goes wrong
I would also let the IO as much outside as possible. If you later want to stream to a socket or something, this can be easily done
DRY
Lines 6 to 24 of your code contain a lot of lines that are almost the same, and if you want to add another error you want to report, you need to add another 5 lines of code, almost the same. I would use a dict and a for-loop to cut on the boilerplate-code
Pythonic
A smaller remark is that you don't use the handy things Python offers, like yield the with-statement, enumerate or collections.counter Also variable naming is not according to PEP-8, but that is mainly aesthetic
My attempt
errors = {
error_hostname_count: {'error_msg' = '--- Error: GET HOST BY NAME # line %i'},
error_socker_462: {'error_msg' = '--- Error: SOCKET -462 # line %i'},
error_hostname_count: {'error_msg' = '--- Error: MEMORY NOT RELEASED # line %i'},
}
Here you define what errors can occur and what the error message should look like
def get_events(log_filehandle):
for line_no, line in enumerate(log_filehandle):
for error_code, error in errors.items():
if error_code in line:
yield line_no, error_code, line
This just takes a filehandle (can be a Stream or Buffer too) and just looks for error_codes in there, if it finds one, it yields it together with the line
def generate_report(report_filehandle, error_list):
error_counter = collections.Counter()
for line_no, error_code, error_line in error_list:
error_counter['generic'] += 1
error_counter[error_code] += 1
error_msg = format_error_msg(line_no, error_code)
report_file.write(error_msg)
report_file.write(error_line)
return error_counter
This loops over the found errors. It increases they counter, formats the message and writes it to the report_file
def format_error_msg(line_no, error_code):
return errors[error_code['error_msg'] % line_no
This uses string-formatting to generate a message from an error_code and line_no
with open(log_filename, 'r') as log_filehandle, open(report_filename, 'w') as report_filehandle:
error_list = get_events(log_filehandle):
error_counter = print_events(report_filehandle, error_list)
This ties it all together. You could use the error_counter to add a summary to the report, or write a summary to another file or database.
This approach has the advantage that if your error recognition changes, you can do this independent of the reporting and vice-versa

Intro: the log that I want parse is coming from an embedded platform programmed in C.
I found into the embedded code, that somewhere there are a printf with \n\r instead of \r\n. I replace each \n\r with \r\n that correspond to windows CR LF.
With this change the python script works! And I can identify the error by its line.

Related

Python file management does not always work?

I encounter a strange problem. I have a BBB with Debian 2015-11-03. My python 2.7 application runs automatically at start up. I do some file management in my script. In this case that part of the script does not work. No error messages visible. Just nothing. When I start my application manually in LX terminal (sudo python aceme.py, same command line as in auto mode, same dir) that part of the script works perfect.
Part of script under debate:
def correction(): # to implement the antenna
# correction values
if abs(az_delta)<5 and abs(el_delta)<5:
azimc = az_delta
elevc = el_delta
conf_data = "" # if we accept the
# corrections we need to save them for future use
input_file = open('acemedat.py', 'r')
for line in input_file:
if 'azcor =' in line:
line = 'azcor = ' + str(azimc) + '\r\n'
if 'elcor =' in line:
line = 'elcor = ' + str(elevc) + '\r\n'
conf_data += line
input_file.close()
output_file = open ('acemedat.py', 'w')
output_file.write(conf_data)
output_file.close()
az_cor.configure(text = str('%5.1f' % azimc))
el_cor.configure(text = str('%5.1f' % elevc))
message.configure(text="Correction values acknowledged and saved")
else:
message.configure(text="Correction denied: delta > 5 degrees")
clear_message.configure(state='normal')
return;
In all cases the else clause works as expected.
What do I need to do to rectify this situation?
Thanks in advance,
Harke
Is the working directory at startup, where the scripts runs (and looks for the acmedat.py), the same as when you execute it manually?

python: tail file in background [duplicate]

I'd like to make the output of tail -F or something similar available to me in Python without blocking or locking. I've found some really old code to do that here, but I'm thinking there must be a better way or a library to do the same thing by now. Anyone know of one?
Ideally, I'd have something like tail.getNewData() that I could call every time I wanted more data.
Non Blocking
If you are on linux (as windows does not support calling select on files) you can use the subprocess module along with the select module.
import time
import subprocess
import select
f = subprocess.Popen(['tail','-F',filename],\
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
print f.stdout.readline()
time.sleep(1)
This polls the output pipe for new data and prints it when it is available. Normally the time.sleep(1) and print f.stdout.readline() would be replaced with useful code.
Blocking
You can use the subprocess module without the extra select module calls.
import subprocess
f = subprocess.Popen(['tail','-F',filename],\
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while True:
line = f.stdout.readline()
print line
This will also print new lines as they are added, but it will block until the tail program is closed, probably with f.kill().
Using the sh module (pip install sh):
from sh import tail
# runs forever
for line in tail("-f", "/var/log/some_log_file.log", _iter=True):
print(line)
[update]
Since sh.tail with _iter=True is a generator, you can:
import sh
tail = sh.tail("-f", "/var/log/some_log_file.log", _iter=True)
Then you can "getNewData" with:
new_data = tail.next()
Note that if the tail buffer is empty, it will block until there is more data (from your question it is not clear what you want to do in this case).
[update]
This works if you replace -f with -F, but in Python it would be locking. I'd be more interested in having a function I could call to get new data when I want it, if that's possible. – Eli
A container generator placing the tail call inside a while True loop and catching eventual I/O exceptions will have almost the same effect of -F.
def tail_F(some_file):
while True:
try:
for line in sh.tail("-f", some_file, _iter=True):
yield line
except sh.ErrorReturnCode_1:
yield None
If the file becomes inaccessible, the generator will return None. However it still blocks until there is new data if the file is accessible. It remains unclear for me what you want to do in this case.
Raymond Hettinger approach seems pretty good:
def tail_F(some_file):
first_call = True
while True:
try:
with open(some_file) as input:
if first_call:
input.seek(0, 2)
first_call = False
latest_data = input.read()
while True:
if '\n' not in latest_data:
latest_data += input.read()
if '\n' not in latest_data:
yield ''
if not os.path.isfile(some_file):
break
continue
latest_lines = latest_data.split('\n')
if latest_data[-1] != '\n':
latest_data = latest_lines[-1]
else:
latest_data = input.read()
for line in latest_lines[:-1]:
yield line + '\n'
except IOError:
yield ''
This generator will return '' if the file becomes inaccessible or if there is no new data.
[update]
The second to last answer circles around to the top of the file it seems whenever it runs out of data. – Eli
I think the second will output the last ten lines whenever the tail process ends, which with -f is whenever there is an I/O error. The tail --follow --retry behavior is not far from this for most cases I can think of in unix-like environments.
Perhaps if you update your question to explain what is your real goal (the reason why you want to mimic tail --retry), you will get a better answer.
The last answer does not actually follow the tail and merely reads what's available at run time. – Eli
Of course, tail will display the last 10 lines by default... You can position the file pointer at the end of the file using file.seek, I will left a proper implementation as an exercise to the reader.
IMHO the file.read() approach is far more elegant than a subprocess based solution.
Purely pythonic solution using non-blocking readline()
I am adapting Ijaz Ahmad Khan's answer to only yield lines when they are completely written (lines end with a newline char) gives a pythonic solution with no external dependencies:
import time
from typing import Iterator
def follow(file, sleep_sec=0.1) -> Iterator[str]:
""" Yield each line from a file as they are written.
`sleep_sec` is the time to sleep after empty reads. """
line = ''
while True:
tmp = file.readline()
if tmp is not None:
line += tmp
if line.endswith("\n"):
yield line
line = ''
elif sleep_sec:
time.sleep(sleep_sec)
if __name__ == '__main__':
with open("test.txt", 'r') as file:
for line in follow(file):
print(line, end='')
The only portable way to tail -f a file appears to be, in fact, to read from it and retry (after a sleep) if the read returns 0. The tail utilities on various platforms use platform-specific tricks (e.g. kqueue on BSD) to efficiently tail a file forever without needing sleep.
Therefore, implementing a good tail -f purely in Python is probably not a good idea, since you would have to use the least-common-denominator implementation (without resorting to platform-specific hacks). Using a simple subprocess to open tail -f and iterating through the lines in a separate thread, you can easily implement a non-blocking tail operation in Python.
Example implementation:
import threading, Queue, subprocess
tailq = Queue.Queue(maxsize=10) # buffer at most 100 lines
def tail_forever(fn):
p = subprocess.Popen(["tail", "-f", fn], stdout=subprocess.PIPE)
while 1:
line = p.stdout.readline()
tailq.put(line)
if not line:
break
threading.Thread(target=tail_forever, args=(fn,)).start()
print tailq.get() # blocks
print tailq.get_nowait() # throws Queue.Empty if there are no lines to read
All the answers that use tail -f are not pythonic.
Here is the pythonic way: ( using no external tool or library)
def follow(thefile):
while True:
line = thefile.readline()
if not line or not line.endswith('\n'):
time.sleep(0.1)
continue
yield line
if __name__ == '__main__':
logfile = open("run/foo/access-log","r")
loglines = follow(logfile)
for line in loglines:
print(line, end='')
So, this is coming quite late, but I ran into the same problem again, and there's a much better solution now. Just use pygtail:
Pygtail reads log file lines that have not been read. It will even
handle log files that have been rotated. Based on logcheck's logtail2
(http://logcheck.org)
Ideally, I'd have something like tail.getNewData() that I could call every time I wanted more data
We've already got one and itsa very nice. Just call f.read() whenever you want more data. It will start reading where the previous read left off and it will read through the end of the data stream:
f = open('somefile.log')
p = 0
while True:
f.seek(p)
latest_data = f.read()
p = f.tell()
if latest_data:
print latest_data
print str(p).center(10).center(80, '=')
For reading line-by-line, use f.readline(). Sometimes, the file being read will end with a partially read line. Handle that case with f.tell() finding the current file position and using f.seek() for moving the file pointer back to the beginning of the incomplete line. See this ActiveState recipe for working code.
You could use the 'tailer' library: https://pypi.python.org/pypi/tailer/
It has an option to get the last few lines:
# Get the last 3 lines of the file
tailer.tail(open('test.txt'), 3)
# ['Line 9', 'Line 10', 'Line 11']
And it can also follow a file:
# Follow the file as it grows
for line in tailer.follow(open('test.txt')):
print line
If one wants tail-like behaviour, that one seems to be a good option.
Another option is the tailhead library that provides both Python versions of of tail and head utilities and API that can be used in your own module.
Originally based on the tailer module, its main advantage is the ability to follow files by path i.e. it can handle situation when file is recreated. Besides, it has some bug fixes for various edge cases.
Python is "batteries included" - it has a nice solution for it: https://pypi.python.org/pypi/pygtail
Reads log file lines that have not been read. Remembers where it finished last time, and continues from there.
import sys
from pygtail import Pygtail
for line in Pygtail("some.log"):
sys.stdout.write(line)
You can also use 'AWK' command.
See more at: http://www.unix.com/shell-programming-scripting/41734-how-print-specific-lines-awk.html
awk can be used to tail last line, last few lines or any line in a file.
This can be called from python.
If you are on linux you implement a non-blocking implementation in python in the following way.
import subprocess
subprocess.call('xterm -title log -hold -e \"tail -f filename\"&', shell=True, executable='/bin/csh')
print "Done"
# -*- coding:utf-8 -*-
import sys
import time
class Tail():
def __init__(self, file_name, callback=sys.stdout.write):
self.file_name = file_name
self.callback = callback
def follow(self, n=10):
try:
# 打开文件
with open(self.file_name, 'r', encoding='UTF-8') as f:
# with open(self.file_name,'rb') as f:
self._file = f
self._file.seek(0, 2)
# 存储文件的字符长度
self.file_length = self._file.tell()
# 打印最后10行
self.showLastLine(n)
# 持续读文件 打印增量
while True:
line = self._file.readline()
if line:
self.callback(line)
time.sleep(1)
except Exception as e:
print('打开文件失败,囧,看看文件是不是不存在,或者权限有问题')
print(e)
def showLastLine(self, n):
# 一行大概100个吧 这个数改成1或者1000都行
len_line = 100
# n默认是10,也可以follow的参数传进来
read_len = len_line * n
# 用last_lines存储最后要处理的内容
while True:
# 如果要读取的1000个字符,大于之前存储的文件长度
# 读完文件,直接break
if read_len > self.file_length:
self._file.seek(0)
last_lines = self._file.read().split('\n')[-n:]
break
# 先读1000个 然后判断1000个字符里换行符的数量
self._file.seek(-read_len, 2)
last_words = self._file.read(read_len)
# count是换行符的数量
count = last_words.count('\n')
if count >= n:
# 换行符数量大于10 很好处理,直接读取
last_lines = last_words.split('\n')[-n:]
break
# 换行符不够10个
else:
# break
# 不够十行
# 如果一个换行符也没有,那么我们就认为一行大概是100个
if count == 0:
len_perline = read_len
# 如果有4个换行符,我们认为每行大概有250个字符
else:
len_perline = read_len / count
# 要读取的长度变为2500,继续重新判断
read_len = len_perline * n
for line in last_lines:
self.callback(line + '\n')
if __name__ == '__main__':
py_tail = Tail('test.txt')
py_tail.follow(1)
A simple tail function from pypi app tailread
You Can use it also via pip install tailread
Recommended for tail access of large files.
from io import BufferedReader
def readlines(bytesio, batch_size=1024, keepends=True, **encoding_kwargs):
'''bytesio: file path or BufferedReader
batch_size: size to be processed
'''
path = None
if isinstance(bytesio, str):
path = bytesio
bytesio = open(path, 'rb')
elif not isinstance(bytesio, BufferedReader):
raise TypeError('The first argument to readlines must be a file path or a BufferedReader')
bytesio.seek(0, 2)
end = bytesio.tell()
buf = b""
for p in reversed(range(0, end, batch_size)):
bytesio.seek(p)
lines = []
remain = min(end-p, batch_size)
while remain > 0:
line = bytesio.readline()[:remain]
lines.append(line)
remain -= len(line)
cut, *parsed = lines
for line in reversed(parsed):
if buf:
line += buf
buf = b""
if encoding_kwargs:
line = line.decode(**encoding_kwargs)
yield from reversed(line.splitlines(keepends))
buf = cut + buf
if path:
bytesio.close()
if encoding_kwargs:
buf = buf.decode(**encoding_kwargs)
yield from reversed(buf.splitlines(keepends))
for line in readlines('access.log', encoding='utf-8', errors='replace'):
print(line)
if 'line 8' in line:
break
# line 11
# line 10
# line 9
# line 8

ERROR: "filetest.submit" doesn't contain any "queue" commands -- no jobs queued

I am writing a python script that creates a Condor submit file, writes information to it and then submits it to be run on Condor.
for f in my_range(0, 10, 2):
condor_submit.write('Arguments = povray +Irubiks.pov +0frame' + str(f) + '.png +K.' + str(f) + '\n') # '+ stat +'
condor_submit.write('Output = ' + str(f) + '.out\n')
condor_submit.write('queue\n\n')
subprocess.call('condor_submit %s' % (fname,), shell=True)
What I don't understand is that I get the error saying there is no 'queue' command.
I opened up the created submit file and it shows up as..
universe=vanilla
.... (the rest of the header)
should_transfer_files = yes
when_to_transfer_files = on_exit
Arguments = test frame0.pov
Output = 0.out
queue
Arguments = test frame2.pov
and so on. Each section composed of argument, output, and queue does end with a queue statement and it is formatted like that.
What is causing it not to notice the queue lines?
Thank you!
The data is likely buffered and not actually in the submit file yet. After you are done writing to the submit file either close the file or flush it before you invoke condor_submit.
The reason it is there after the program errors out and you inspect it is because the file is likely closed either (a) later in your program or (b) automatically at program exit.

What is an output file in python

I'm starting to work on problems for google's Code Jam. However I there seams to be a problem with my submission. Whenever I submit I am told "Your output should start with 'Case #1: '". My output a print statement starts with ""Case #%s: %s"%(y + 1, p)" which says Case #1: ext... when I run my code.
I looked into it and it said "Your output should start with 'Case #1: ': If you get this message, make sure you did not upload the source file in place of the output file, and that you're outputting case numbers properly. The first line of the output file should always start with "Case #1:", followed by a space or the end of the line."
So what is an output file and how would I incorporate it into my code?
Extra info: This is my code I'm saving it as GoogleCode1.py and submitting that file. I wrote it in the IDLE.
import string
firstimput = raw_input ("cases ")
for y in range(int(first)):
nextimput = raw_input ("imput ")
firstlist = string.split(nextimput)
firstlist.reverse()
p = ""
for x in range(len(firstlist)):
p = p +firstlist[x] + " "
p = p [:-1]
print "Case #%s: %s"%(y + 1, p)
Run the script in a shell, and redirect the output.
python GoogleCode1.py > GoogleCode1.out
I/O redirection aside, the other way to do this would be to read from and write to various files. Lookup file handling in python
input_file = open('/path/to/input_file')
output_file = open('/path/to/output_file', 'w')
for line in input_file:
answer = myFunction(line)
output_file.write("Case #x: "+str(answer))
input_file.close()
output_file.close()
Cheers
Make sure you're submitting a file containing what your code outputs -- don't submit the code itself during a practice round.

tail multiple logfiles in python

This is probably a bit of a silly excercise for me, but it raises a bunch of interesting questions. I have a directory of logfiles from my chat client, and I want to be notified using notify-osd every time one of them changes.
The script that I wrote basically uses os.popen to run the linux tail command on every one of the files to get the last line, and then check each line against a dictionary of what the lines were the last time it ran. If the line changed, it used pynotify to send me a notification.
This script actually worked perfectly, except for the fact that it used a huge amount of cpu (probably because it was running tail about 16 times every time the loop ran, on files that were mounted over sshfs.)
It seems like something like this would be a great solution, but I don't see how to implement that for more than one file.
Here is the script that I wrote. Pardon my lack of comments and poor style.
Edit: To clarify, this is all linux on a desktop.
Not even looking at your source code, there are two ways you could easily do this more efficiently and handle multiple files.
Don't bother running tail unless you have to. Simply os.stat all of the files and record the last modified time. If the last modified time is different, then raise a notification.
Use pyinotify to call out to Linux's inotify facility; this will have the kernel do option 1 for you and call back to you when any files in your directory change. Then translate the callback into your osd notification.
Now, there might be some trickiness depending on how many notifications you want when there are multiple messages and whether you care about missing a notification for a message.
An approach that preserves the use of tail would be to instead use tail -f. Open all of the files with tail -f and then use the select module to have the OS tell you when there's additional input on one of the file descriptors open for tail -f. Your main loop would call select and then iterate over each of the readable descriptors to generate notifications. (You could probably do this without using tail and just calling readline() when it's readable.)
Other areas of improvement in your script:
Use os.listdir and native Python filtering (say, using list comprehensions) instead of a popen with a bunch of grep filters.
Update the list of buffers to scan periodically instead of only doing it at program boot.
Use subprocess.popen instead of os.popen.
If you're already using the pyinotify module, it's easy to do this in pure Python (i.e. no need to spawn a separate process to tail each file).
Here is an example that is event-driven by inotify, and should use very little cpu. When IN_MODIFY occurs for a given path we read all available data from the file handle and output any complete lines found, buffering the incomplete line until more data is available:
import os
import select
import sys
import pynotify
import pyinotify
class Watcher(pyinotify.ProcessEvent):
def __init__(self, paths):
self._manager = pyinotify.WatchManager()
self._notify = pyinotify.Notifier(self._manager, self)
self._paths = {}
for path in paths:
self._manager.add_watch(path, pyinotify.IN_MODIFY)
fh = open(path, 'rb')
fh.seek(0, os.SEEK_END)
self._paths[os.path.realpath(path)] = [fh, '']
def run(self):
while True:
self._notify.process_events()
if self._notify.check_events():
self._notify.read_events()
def process_default(self, evt):
path = evt.pathname
fh, buf = self._paths[path]
data = fh.read()
lines = data.split('\n')
# output previous incomplete line.
if buf:
lines[0] = buf + lines[0]
# only output the last line if it was complete.
if lines[-1]:
buf = lines[-1]
lines.pop()
# display a notification
notice = pynotify.Notification('%s changed' % path, '\n'.join(lines))
notice.show()
# and output to stdout
for line in lines:
sys.stdout.write(path + ': ' + line + '\n')
sys.stdout.flush()
self._paths[path][1] = buf
pynotify.init('watcher')
paths = sys.argv[1:]
Watcher(paths).run()
Usage:
% python watcher.py [path1 path2 ... pathN]
Simple pure python solution (not the best, but doesn't fork, spits out 4 empty lines after idle period and marks everytime the source of the chunk, if changed):
#!/usr/bin/env python
from __future__ import with_statement
'''
Implement multi-file tail
'''
import os
import sys
import time
def print_file_from(filename, pos):
with open(filename, 'rb') as fh:
fh.seek(pos)
while True:
chunk = fh.read(8192)
if not chunk:
break
sys.stdout.write(chunk)
def _fstat(filename):
st_results = os.stat(filename)
return (st_results[6], st_results[8])
def _print_if_needed(filename, last_stats, no_fn, last_fn):
changed = False
#Find the size of the file and move to the end
tup = _fstat(filename)
# print tup
if last_stats[filename] != tup:
changed = True
if not no_fn and last_fn != filename:
print '\n<%s>' % filename
print_file_from(filename, last_stats[filename][0])
last_stats[filename] = tup
return changed
def multi_tail(filenames, stdout=sys.stdout, interval=1, idle=10, no_fn=False):
S = lambda (st_size, st_mtime): (max(0, st_size - 124), st_mtime)
last_stats = dict((fn, S(_fstat(fn))) for fn in filenames)
last_fn = None
last_print = 0
while 1:
# print last_stats
changed = False
for filename in filenames:
if _print_if_needed(filename, last_stats, no_fn, last_fn):
changed = True
last_fn = filename
if changed:
if idle > 0:
last_print = time.time()
else:
if idle > 0 and last_print is not None:
if time.time() - last_print >= idle:
last_print = None
print '\n' * 4
time.sleep(interval)
if '__main__' == __name__:
from optparse import OptionParser
op = OptionParser()
op.add_option('-F', '--no-fn', help="don't print filename when changes",
default=False, action='store_true')
op.add_option('-i', '--idle', help='idle time, in seconds (0 turns off)',
type='int', default=10)
op.add_option('--interval', help='check interval, in seconds', type='int',
default=1)
opts, args = op.parse_args()
try:
multi_tail(args, interval=opts.interval, idle=opts.idle,
no_fn=opts.no_fn)
except KeyboardInterrupt:
pass

Categories

Resources