I want to tail multiple files concurrently and push the logs to scribe.
I am reading the files from a Config file then I want to tail each file and send the logs to scribe.
What I have tried is sends log for only the first file and doesn't for the others.
I want to run the tailing concurrently for each file and send the logs for each one of them at same time.
for l in Config.items('files'):
print l[0]
print l[1]
filename = l[1]
file = open(filename,'r')
st_results = os.stat(l[1])
st_size = st_results[6]
file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print line, # already has newline
category=l[0]
message=line
log_entry = scribe.LogEntry(category, message)
socket = TSocket.TSocket(host='localhost', port=1463)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=transport, strictRead=False, strictWrite=False)
client = scribe.Client(iprot=protocol, oprot=protocol)
transport.open()
result = client.Log(messages=[log_entry])
transport.close()
Try something like this (Inspired by this)
import threading
def monitor_file(l):
print l[0]
print l[1]
filename = l[1]
file = open(filename,'r')
st_results = os.stat(l[1])
st_size = st_results[6]
file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print line, # already has newline
category=l[0]
message=line
log_entry = scribe.LogEntry(category, message)
socket = TSocket.TSocket(host='localhost', port=1463)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=transport, strictRead=False, strictWrite=False)
client = scribe.Client(iprot=protocol, oprot=protocol)
transport.open()
result = client.Log(messages=[log_entry])
transport.close()
for l in Config.items('files'):
thread = threading.Thread(target=monitor_file, args=(l))
A different implementation of #Pengman's idea:
#!/usr/bin/env python
import os
import time
from threading import Thread
def follow(filename):
with open(filename) as file:
file.seek(0, os.SEEK_END) # goto EOF
while True:
for line in iter(file.readline, ''):
yield line
time.sleep(1)
def logtail(category, filename):
print category
print filename
for line in follow(filename):
print line,
log_entry(category, line)
for args in Config.items('files'):
Thread(target=logtail, args=args).start()
Where log_entry() is a copy of the code from the question:
def log_entry(category, message):
entry = scribe.LogEntry(category, message)
socket = TSocket.TSocket(host='localhost', port=1463)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=transport,strictRead=False,
strictWrite=False)
client = scribe.Client(iprot=protocol, oprot=protocol)
transport.open()
result = client.Log(messages=[entry])
transport.close()
follow() could be implemented using FS monitoring tools, see tail -f in python with no time.sleep.
Related
I'm currently creating an encrypted chat program. Text chat works well. However, I want to implement file transfer, but it doesn't work. My code is trying to work in a way that when one client tries to transfer a file, the server receives it and sends it to another client. When I type '/filetransfer' to transfer file.
Dick: hi
/filetransfer
Sending...
Exception in thread Thread-2:
Traceback (most recent call last):
File "C:\Python\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Python\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "c:\Users\USER\Desktop\filetest\client.py", line 198, in sndChat
self.sndFile()
File "c:\Users\USER\Desktop\filetest\client.py", line 233, in sndFile
clientSocket.send(l)
This error occurred. I think the client cannot send the file data.
Also, I would like to apply Diffie-Hellman and AES used for text encryption to file transfer. I spent a lot of time here, but it doesn't work. I desperately need help...
Client.py
def rcvChat(self):
print("\nWelcome to Chatee!")
while True:
try:
message = clientSocket.recv(4096).decode(encodeType)
if self.thred_done:
message=self.aes.decrypt(message)
print(message)
if message == 'filetransfer start':
filereceive_thread = threading.Thread(target=self.rcvChat)
filereceive_thread.join()
#write_thread = threading.Thread(target=self.sndChat)
#write_thread.join()
#self.rcvFile()
break
def sndChat(self):
while True:
message = input('')
if message == '/filetransfer':
message = self.aes.encrypt(message)
clientSocket.send(message)
writefile_thread = threading.Thread(target=self.sndChat)
writefile_thread.start()
self.sndFile()
break
message = self.aes.encrypt(message)
clientSocket.send(message)
def sndFile(self):
print("---File Transfer---")
print("Type a file name...")
filename = 'C:\\Users\\USER\\Desktop\\filetest\\test.txt'
#clientSocket.send(filename.encode(encodeType))
#data_transferred = 0
if not exists(filename):
print("The file doesn't exsit.")
f = open(filename,'rb')
print ('Sending...')
l = f.read(8096)
while (l):
print ('Sending...')
#data_transferred += clientSocket.send(l)
clientSocket.send(l)
l = f.read(8096)
f.close()
print ('Done Sending')
#clientSocket.shutdown(socket.SHUT_WR)
print (clientSocket.recv(8096))
#clientSocket.close
def rcvFile(self):
#filename = clientSocket.recv(1024).decode(encodeType)
#filename = self.aes.decrypt(filename)
filename = 'received.txt'
f = open(filename,'wb')
while True:
print ('Receiving...')
l = clientSocket.recv(8096)
if not l:
print("Fail file transfer")
#sys.exit()
while (l):
print ('Receiving...')
f.write(l)
l = clientSocket.recv(8096)
f.close()
print ('Done Receiving')
Server.py
def handle_client(self,client,client_addr):
client_pvt_key=self.client_keys[client]
client_name=self.clients[client]
print(f"[{client_addr[0]}]-{client_addr[1]} - [{client_name}] - Connected")
print(f"Active Connections - {threading.active_count()-1}")
self.broadcast(f'{client_name} has joined the chat!\n\n')
aes=AESCipher(client_pvt_key)
while True:
try:
msg = aes.decrypt(client.recv(self.header)) #복호화 안하고 바로 브로드캐스트 해도 될듯
if msg == '/filetransfer':
#보낸 사람 제외하고 보내기
self.broadcast('filetransfer start')
thread = threading.Thread(target=self.sndFile, args=(client, ))
thread.start()
thread.join()
elif msg==self.quit_msg:
break
print(f"[{client_addr[0]}]-{client_addr[1]} - [{client_name}]")
msg=f'{client_name}: {msg}'
self.broadcast(msg)
except:
break
client.close()
print(f"[{client_addr[0]}]-{client_addr[1]} - [{client_name}] - quit_msged")
del self.clients[client]
del self.client_keys[client]
self.broadcast(f'{client_name} has left the chat\n')
print(f"Active Connections - {threading.active_count()-2}")
def broadcast(self,msg):
for client in self.clients:
aes=AESCipher(self.client_keys[client])
crypted_msg=aes.encrypt(msg)
client.send(crypted_msg)
def sndFile(self, client):
print("---File Transfer---")
#print("Type a file name...")
client_pvt_key=self.client_keys[client]
aes=AESCipher(client_pvt_key)
#filename = client.recv(1024).decode(self.encodetype)
#self.broadcast('fuck',filename)
while True:
try:
l = client.recv(8096)
print('Rceiving...')
#del self.clients[client]
for client in self.clients:
client.send(l)
#client.send(filename.encode(self.encodetype))
#l = client.recv(8096)
if not l:
print("Fail file transfer")
except:
print('file fail')
break
I am new to python. I am writing a python program to write to a JSON file if the website is unreachable. The multiple websites will be stored in hosts variable. It will be scheduled to check every 5 seconds. I have used pool from multiprocessing to process the website at the same time without delay. After that, i will write the data to the json file. But in here, it is writing only one website data to json file. So how to make this to write two data at the same time.
Here's the sample code:
import os
from multiprocessing import Pool
from datetime import datetime
import time
import json
hosts = ["www.google.com","www.smackcoders.com"]
n = len(hosts)
def write(hosts):
u = "down"
name = "stack.json"
if not os.path.exists(name):
with open(name, 'w') as f:
f.write('{}')
result = [(timestamp, {'monitor.status': u,
"monitor.id": "tcp-tcp#"+hosts
})]
with open(name, 'rb+') as f:
f.seek(-1, os.SEEK_END)
f.truncate()
for entry in result:
_entry = '"{}":{},\n'.format(entry[0], json.dumps(entry[1]))
_entry = _entry.encode()
f.write(_entry)
f.write('}'.encode('ascii'))
def main(hosts):
p = Pool(processes= n)
result = p.map(write, hosts)
while True:
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
main(hosts)
time.sleep(5)
My output:
""March 13 2019, 10:49:03":{"monitor.id": "tcp-tcp#www.smackcoders.com", "monitor.status": "down"},
}
Required Output:
{"March 13 2019, 10:49:03":{"monitor.id": "tcp-tcp#www.smackcoders.com", "monitor.status": "down"},"March 13 2019, 10:49:03":{"monitor.id": "tcp-tcp#www.google.com", "monitor.status": "down"},
}
Ive made some minor changes to your code and implemented a Lock.
import os
from multiprocessing import Pool,RLock
from datetime import datetime
import time
import json
file_lock=RLock()
hosts = ["www.google.com","www.smackcoders.com"]
n = len(hosts)
def write(hosts):
u = "down"
name = "stack.json"
if not os.path.exists(name):
with open(name, 'w') as f:
f.write('{}')
result = [(timestamp, {'monitor.status': u,
"monitor.id": "tcp-tcp#"+hosts
})]
with file_lock:
with open(name, 'rb+') as f:
f.seek(-1, os.SEEK_END)
f.truncate()
for entry in result:
_entry = '"{}":{},\n'.format(entry[0], json.dumps(entry[1]))
_entry = _entry.encode()
f.write(_entry)
f.write('}'.encode('ascii'))
def main(hosts):
p = Pool(processes= n)
result = p.map(write, hosts)
while True:
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
main(hosts)
time.sleep(5)
However, for a long running process that constantly has to read and write a file for logging seems like a poor implementation as the code will have to read a bulky file and completely rewrite it on every process. Consider writing the log in a database instead.
Here's a different option that will use Thread over Pool.
Created a class to get the return of join()
# Class that overwrite Thread to get the return of join()
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=None, kwargs=None, Verbose=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
super().__init__(group, target, name, args, kwargs)
self._return = None
def run(self):
print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
I have changed the code to get the status of each hosts first, then writing the result to your file. Also fixed the way the JSON file is written.
import os
from datetime import datetime
import time
import json
from threading import Thread
hosts = ["www.google.com","www.smackcoders.com"]
filepath = os.path.join(os.getcwd(), "stack.json")
n = len(hosts)
def perform_ping(host_ip):
"""
You have hardcoded down, this method will ping to check if we get an ICMP response
"""
response = os.system("ping -c 1 " + host_ip)
if response == 0:
return 'UP'
else:
return 'DOWN'
def write_result(timestamp, results):
# u = "down" Using perform_ping to get the status
if not os.path.exists(filepath):
current_file = {}
else:
# If file exist, reading the current output
with open(filepath, 'r') as f_read:
current_file = json.loads(f_read.read())
inner_result = []
for result in results:
host, status = result
inner_result.append({'monitor.status': status,
"monitor.id": "tcp-tcp#"+host
})
current_file[timestamp] = inner_result
# writing the file with new input
with open(filepath, 'w') as f_write:
f_write.write(json.dumps(current_file))
def main():
while True:
thread_list = []
for host_ip in hosts:
thread_list.append(ThreadWithReturnValue(target=perform_ping, name=host_ip, args=(host_ip, )))
results = []
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
for thread in thread_list:
thread.start()
for thread in thread_list:
results.append((thread.name, thread.join()))
# Ping is done in parallel, writing the result at the end to avoid thread collision and reading/writing the file to many times if you increase the number of host
write_result(timestamp, results)
time.sleep(5)
if __name__ == '__main__':
main()
I'm trying to use python to start a process inside of existing container and communicate with it.
What I have:
import docker
import os
client = docker.APIClient()
buf = b"ls"
exec_setup = client.exec_create(container="some-tag", cmd="/bin/bash", stdin=True, tty=True)
socket = client.exec_start(exec_id = exec_setup["Id"], socket=True)
written = os.write(socket.fileno(), buf)
nxt = os.read(socket.fileno(), 1024)
print(nxt)
But when I run it I'm getting BlockingIOError: [Errno 11] Resource temporarily unavailable
Appreciate any help
Try attach_socket and set it into non-blocking mode.
params here is dict with params that you want to recv.
sock = client.attach_socket(cont, params=params)
sock._sock.setblocking(False)
After it use select in infinite loop to write/read from socket.
buff = b''
while True:
read, write, _ = select.select([sock], [sock], [], 1)
if read:
try:
data = socket_read(sock)
except Exception:
break
if data is None:
break
stream_data += data
if write and stdin:
try:
written = socket_write(sock, stdin)
except Exception:
break
stdin = stdin[written:]
if not stdin:
sock._sock.shutdown(socket.SHUT_WR)
socket_read/socket_write here is function with os.read/os.write
I am running Python 2.7 on a Unix environment (tested on Ubuntu and OSX)
I have the following programs:
With os.open():
[SCRIPT 1]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def set_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if os.path.exists(pipe_name):
os.remove(pipe_name)
os.mkfifo(pipe_name)
else:
os.mkfifo(pipe_name)
pipe_1 = os.open(pipe_1_name, os.O_WRONLY)
os.write(pipe_1, "server_message_0\n")
pipe_2 = open(pipe_2_name, 'r')
received = pipe_2.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_3 = open(pipe_3_name, 'r')
received = pipe_3.readline()[:-1]
print "[1] Now processing if received is correct: " + received
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = set_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
[SCRIPT 2]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def get_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if not os.path.exists(pipe_name):
raise Exception("Pipe "+pipe_name+" does not exist!")
pipe_1 = open(pipe_1_name, 'r')
received = pipe_1.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_2 = os.open(pipe_2_name, os.O_WRONLY)
os.write(pipe_2, "client_message_0\n")
pipe_3 = os.open(pipe_3_name, os.O_WRONLY)
os.write(pipe_3, "client_message_1\n")
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = get_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
The logic is simple:
[Pipe 1]
1. Script 1 opens a write pipe to Script 2.
2. Script 2 reads from the pipe.
[Pipe 2]
3. Script 2 open a write pipe to Script 1.
4. Script 1 reads from the pipe.
[Pipe 3]
5. Script 2 open a write pipe to Script 1.
6. Script 1 reads from the pipe.
Works exactly as expected.
Here is the problem. I don't want to use os.open(). I would like the receive a file object and use it to interface with the pipe. Clearly, it is not impossible since I can read from a pipe with a file object. However, The following script does not work.
Without os.open()
[Script 1]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def set_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if os.path.exists(pipe_name):
os.remove(pipe_name)
os.mkfifo(pipe_name)
else:
os.mkfifo(pipe_name)
pipe_1 = open(pipe_1_name, 'w')
pipe_1.write("server_message_0\n")
pipe_2 = open(pipe_2_name, 'r')
received = pipe_2.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_3 = open(pipe_3_name, 'r')
received = pipe_3.readline()[:-1]
print "[1] Now processing if received is correct: " + received
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = set_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
[Script 2]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def get_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if not os.path.exists(pipe_name):
raise Exception("Pipe "+pipe_name+" does not exist!")
pipe_1 = open(pipe_1_name, 'r')
received = pipe_1.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_2 = open(pipe_2_name, 'w')
pipe_2.write("client_message_0\n")
pipe_3 = open(pipe_3_name, 'w')
pipe_3.write("client_message_1\n")
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = get_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
They look the same, don't they? The only difference is how I open the fifo. Instead of os.open(pipe_name,os.O_WRONLY) I use pipe = open(pipe_name, 'w').
What happens in the second set of scripts, the ones that don't use os.open(), Script 1 blocks at pipe_2 = open(pipe_2_name, 'r') while Script 2 blocks at pipe_2 = open(pipe_2_name, 'w').
Why is this happening?
Sorry for the wall of text. I am really confused about this issue.
What happens in the second set of scripts, the ones that don't use
os.open(), Script 1 blocks at pipe_2 = open(pipe_2_name, 'r') while Script 2 blocks at pipe_2 = open(pipe_2_name, 'w').
No, Script 2 blocks at received = pipe_1.readline()[:-1].
Why is this happening?
It's because Script 1's open(pipe_1_name, 'w') causes the written message to be buffered in fixed-size chunks (typically 4096 or 8192 bytes), so the pipe_1.write("server_message_0\n") does not yet write anything to the pipe, but only to the buffer, and Script 2 doesn't get anything to read. See open() and also How often does python flush to a file?
To cure this, since your messages are complete lines, it suffices to use line buffering, e. g.
pipe_1 = open(pipe_1_name, 'w', 1)
(as well for the other write pipes).
I have a problem constructing gevent tail function. In general, the code works when I comment gevent.sleep in loop, but then CPU utilization is 100%. When I leave gevent.sleep program works but nothing is happening.
Gevent version is 1.0b1.
import os
import gevent
def follow(filename):
fd = os.open(filename, os.O_RDONLY|os.O_NONBLOCK)
os.lseek(fd, 0, os.SEEK_END)
hub = gevent.get_hub()
watcher = hub.loop.io(fd, 1)
while True:
hub.wait(watcher)
lines = os.read(fd, 4096).splitlines()
if not lines:
#gevent.sleep(.1)
continue
else:
for line in lines:
print "%s:%s" % (filename, line)
os.close(fd)
if __name__ == '__main__':
job1 = gevent.spawn(follow, '/var/log/syslog')
job2 = gevent.spawn(follow, '/var/log/messages')
gevent.joinall([job1, job2])
Starting with gevent 1.0b2 you can use stat watchers to get notifications on file changes.
See the code and the libev documentation for stat watchers.
Well, that's code doesn't 'tail' the file, it's just prints the whole file, BUT it's show's how 'loop.stat' works. It wait's for file to change - or be just touched, and then print's content. When it's wait - it take's almost no resources!
import gevent,os
def follow(filename):
hub = gevent.get_hub()
watcher = hub.loop.stat(filename)
while True:
hub.wait(watcher)
with open(filename) as f:
print f.read()
if __name__ == '__main__':
jobs=[gevent.spawn(follow,'/var/log/syslog')]
jobs+=[gevent.spawn(follow,'/var/log/messages')]
gevent.joinall(jobs)
Obviously wrong approach. This works perfectly:
import os
import gevent
def follow(filename):
fd = os.open(filename, os.O_RDONLY|os.O_NONBLOCK)
os.lseek(fd, 0, os.SEEK_END)
while True:
lines = os.read(fd, 4096).splitlines()
if not lines:
gevent.sleep(.5)
continue
else:
for line in lines:
print "%s:%s" % (filename, line)
os.close(fd)
if __name__ == '__main__':
job1 = gevent.spawn(follow, '/var/log/syslog')
job2 = gevent.spawn(follow, '/var/log/messages')
gevent.joinall([job1, job2])