I can't find anywhere how to open read and rewrite a tensorboard file (without the dependency of tensorflow).
Using the following code yields a mysterious checksum error:
Do you know what is causing this and how I can rewrite the file?
(If you print event it looks normal.)
Here is the code:
import sys
import struct
import tensorboard.compat.proto.event_pb2 as event_pb2
import mmap
log_file = sys.argv[1]
def read(data, offset):
header = struct.unpack_from('Q', data, offset)
event_str = data[offset+12:offset+12+int(header[0])]
return 12+int(header[0])+4+offset, event_str
with open(log_file, 'rb') as f:
data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
offset = 0
while offset < len(data):
offset, event_str = read(data, offset)
event = event_pb2.Event()
event.ParseFromString(event_str)
with open('test.tfevents', 'ab') as w:
w.write(event.SerializeToString())
Related
I am trying to convert from stp to stl. The code is:
rom OCC.Core.STEPControl import STEPControl_Reader
from OCC.Core.StlAPI import StlAPI_Writer
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
input_file = 'stl_test.stp' # input STEP (AP203/AP214 file)
output_file = 'stl_test.stl' # output X3D file
step_reader = STEPControl_Reader()
step_reader.ReadFile( input_file )
step_reader.TransferRoot()
myshape = step_reader.Shape()
print("File readed")
# Export to STL
stl_writer = StlAPI_Writer()
stl_writer.SetASCIIMode(True)
stl_writer.Write(myshape, output_file)
print(stl_writer.Write(myshape, output_file))
print("Written")
THis is not saving anything in the working dir. And print(stl_writer.Write(myshape, output_file)) is giving false output.
Can anyone help?
I had the same problem, even though your way is like it is told in the docs...
The following solution worked for me:
# Imports
from OCC.Core.STEPControl import STEPControl_Reader
from OCC.Core.StlAPI import StlAPI_Writer
from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh
input_file = 'stl_test.stp' # input STEP (AP203/AP214 file)
output_file = 'stl_test.stl' # output X3D file
### load stp file
step_reader = STEPControl_Reader()
stat = step_reader.ReadFile(input_file)
if stat == 1:
step_reader.TransferRoot()
body = step_reader.Shape()
else:
print('...........Load failed')
return 'Error'
print("File read")
### Convert and export body to STL File
mesh = BRepMesh_IncrementalMesh(body, 0.1)
writer = StlAPI_Writer()
writer.Write(mesh.Shape(), output_file)
print("Written")
Maybe it has something to do with the "AP203/AP214 file"?! I don't realy know.
I have the following script:
import sys, os
pid = sys.argv[1]
maps_file = open("/proc/%s/maps" % pid, 'r')
mem_file = open("/proc/%s/mem" % pid, 'r')
for line in maps_file.readlines(): # for each mapped region
m = re.match(r'([0-9A-Fa-f]+)-([0-9A-Fa-f]+) ([-r])', line)
if m.group(3) == 'r': # if this is a readable region
start = int(m.group(1), 16)
end = int(m.group(2), 16)
mem_file.seek(start) # seek to region start
chunk = mem_file.read(end - start) # read region contents
#print chunk, # dump contents to standard output
mem_dump = open(pid+".bin", "wb")
mem_dump.write(str(chunk,))
mem_dump.close()
maps_file.close()
mem_file.close()
All workds well (dumping the process' memory) so far but I can't save data to file. What am I doing wrong?
Could it be that the files are getting written to somewhere you don't expect (looks like they will be written to the current directory)?
I need to setup some test conditions to simulate a filled up disk. I created the following to simply write garbage to the disk:
#!/usr/bin/python
import os
import sys
import mmap
def freespace(p):
"""
Returns the number of free bytes on the drive that ``p`` is on
"""
s = os.statvfs(p)
return s.f_bsize * s.f_bavail
if __name__ == '__main__':
drive_path = sys.argv[1]
output_path = sys.argv[2]
output_file = open(output_path, 'w')
while freespace(drive_path) > 0:
output_file.write("!")
print freespace(drive_path)
output_file.flush()
output_file.close()
As far as I can tell by looking at the return value from freespace, the write method does not write the file to until it is closed, thereby making the while condition invalid.
Is there a way I can write the data directly to the file? Or another solution perhaps?
This is untested but I imagine something along these lines will be the quickest way to fill the disk easily
import sys
import errno
write_str = "!"*1024*1024*5 # 5MB
output_path = sys.argv[1]
with open(output_path, "w") as f:
while True:
try:
f.write(write_str)
f.flush()
except IOError as err:
if err.errno == errno.ENOSPC:
write_str_len = len(write_str)
if write_str_len > 1:
write_str = write_str[:write_str_len/2]
else:
break
else:
raise
You could try/catch a disk full exception on write.
Here is the situation:
I get gzipped xml documents from Amazon S3
import boto
from boto.s3.connection import S3Connection
from boto.s3.key import Key
conn = S3Connection('access Id', 'secret access key')
b = conn.get_bucket('mydev.myorg')
k = Key(b)
k.key('documents/document.xml.gz')
I read them in file as
import gzip
f = open('/tmp/p', 'w')
k.get_file(f)
f.close()
r = gzip.open('/tmp/p', 'rb')
file_content = r.read()
r.close()
Question
How can I ungzip the streams directly and read the contents?
I do not want to create temp files, they don't look good.
Yes, you can use the zlib module to decompress byte streams:
import zlib
def stream_gzip_decompress(stream):
dec = zlib.decompressobj(32 + zlib.MAX_WBITS) # offset 32 to skip the header
for chunk in stream:
rv = dec.decompress(chunk)
if rv:
yield rv
The offset of 32 signals to the zlib header that the gzip header is expected but skipped.
The S3 key object is an iterator, so you can do:
for data in stream_gzip_decompress(k):
# do something with the decompressed data
I had to do the same thing and this is how I did it:
import gzip
f = StringIO.StringIO()
k.get_file(f)
f.seek(0) #This is crucial
gzf = gzip.GzipFile(fileobj=f)
file_content = gzf.read()
For Python3x and boto3-
So I used BytesIO to read the compressed file into a buffer object, then I used zipfile to open the decompressed stream as uncompressed data and I was able to get the datum line by line.
import io
import zipfile
import boto3
import sys
s3 = boto3.resource('s3', 'us-east-1')
def stream_zip_file():
count = 0
obj = s3.Object(
bucket_name='MonkeyBusiness',
key='/Daily/Business/Banana/{current-date}/banana.zip'
)
buffer = io.BytesIO(obj.get()["Body"].read())
print (buffer)
z = zipfile.ZipFile(buffer)
foo2 = z.open(z.infolist()[0])
print(sys.getsizeof(foo2))
line_counter = 0
for _ in foo2:
line_counter += 1
print (line_counter)
z.close()
if __name__ == '__main__':
stream_zip_file()
You can try PIPE and read contents without downloading file
import subprocess
c = subprocess.Popen(['-c','zcat -c <gzip file name>'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for row in c.stdout:
print row
In addition "/dev/fd/" + str(c.stdout.fileno()) will provide you FIFO file name (Named pipe) which can be passed to other program.
I am trying to unzip a gzipped file in Python using the gzip module. The pre-condition is that, I get 160 bytesof data at a time, and I need to unzip it before I request for the next 160 bytes. Partial unzipping is OK, before requesting the next 160 bytes. The code I have is
import gzip
import time
import StringIO
file = open('input_cp.gz', 'rb')
buf = file.read(160)
sio = StringIO.StringIO(buf)
f = gzip.GzipFile(fileobj=sio)
data = f.read()
print data
The error I am getting is IOError: CRC check failed. I am assuming this is cuz it expects the entire gzipped content to be present in buf, whereas I am reading in only 160 bytes at a time. Is there a workaround this??
Thanks
Create your own class with a read() method (and whatever else GzipFile needs from fileobj, like close and seek) and pass it to GzipFile. Something like:
class MyBuffer(object):
def __init__(self, input_file):
self.input_file = input_file
def read(self, size=-1):
if size < 0:
size = 160
return self.input_file.read(min(160, size))
Then use it like:
file = open('input_cp.gz', 'rb')
mybuf = MyBuffer(file)
f = gzip.GzipFile(fileobj=mybuf)
data = f.read()