I have a textfile with lines of hexadecimals. I want each line to transform into a jpeg.file because they are photos. I can do that individually using binascii.a2b_hex like this (I've shortened the hex):
data = binascii.a2b_hex("FFD8FFE")
with open('image.jpg', 'wb') as image_file:
#image_file.write(data)
Now I want to this in bulk. So I have a textfile with lines of hexadecimals and I want each hexadecimal to write to his own jpeg file. I think I'm almost there but my code gives me this error:
ValueError: too many values to unpack
Here is the code:
import binascii
text_file = open("photos-clean.txt", "w")
#for each hexadecimal, put it in between single quotes so it becomes a string. Also remove the first two chars from a line.
with open('photos.txt', 'r') as f:
for i in f:
photo = i[2:]
quotes = "'" + photo.rstrip() + "'"
print quotes
text_file.write(quotes)
text_file.write("\n")
text_file.close()
#for each hexadecimal, transform it to a jpeg with binascii and write it to his own jpeg.file
with open("photos-clean.txt", "r") as f2:
for i, data in (f2):
transform = binascii.a2b_hex(i)
with open('photo{}.jpg'.format(transform), 'wb') as output:
output.write(data)
Edit: I have the answer and this is what I should've done:
import binascii
text_file = open("photos-clean.txt", "w")
with open('photos.txt', 'r') as f:
for i in f:
photo = i[2:]
text_file.write(photo)
text_file.write("\n")
text_file.close()
with open("photos-clean.txt", "r") as f2:
count=0
for i in f2:
count = count + 1
cleaned = i.strip("\r\n")
transform = binascii.a2b_hex(cleaned)
with open("{}.jpg".format(count), 'wb') as output:
output.write(transform)
i guess you are having error at line for i, data in (f2):.
You try to unpack 2 value i and data from f2, i guess you need enumerate as below.
Also, i assume you wanna use i as index to the filename and write the transform instead of data into output
with open("photos-clean.txt", "r") as f2:
for i, data in enumerate(f2):
transform = binascii.a2b_hex(data)
with open('photo{}.jpg'.format(i), 'wb') as output:
output.write(transfrom)
Related
So as the title suggests I'm trying to write an array to a file, but then I need to recall that array and append more to it and then write it back to the same file, and then this same process over and over again.
The code I'm have so far is:
c = open(r"board.txt", "r")
current_position = []
if filesize > 4:
current_position = [c.read()]
print(current_position)
stockfish.set_position(current_position)
else:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
#There is a lot more code here that appends stuff to the array but I don't want to #add anything that will be irrelevant to the problem
with open('board.txt', 'w') as filehandle:
for listitem in current_position:
filehandle.write('"%s", ' % listitem)
z = open(r"board.txt", "r")
print(z.read())
My array end up looking like this when I read the file
""d2d4", "d7d5", ", "a2a4", "e2e4",
All my code is on this replit if anyone needs more info
A few ways to do this:
First, use newline as a delimiter (simple, not the most space efficient):
# write
my_array = ['d2d4', 'd7d5']
with open('board.txt', 'w+') as f:
f.writelines([i + '\n' for i in my_array])
# read
with open('board.txt') as f:
my_array = f.read().splitlines()
If your character strings all have the same length, you don't need a delimiter:
# write
my_array = ['d2d4', 'd7d5'] # must all be length 4 strs
with open('board.txt', 'w+') as f:
f.writelines(my_array)
# read file, splitting string into groups of 4 characters
with open('board.txt') as f:
in_str = f.read()
my_array = [in_str[i:i+4] for i in range(0, len(in_str), 4)]
Finally, consider pickle, which allows writing/reading Python objects to/from binary files:
import pickle
# write
my_array = ['d2d4', 'd7d5']
with open('board.board', 'wb+') as f: # custom file extension, can be anything
pickle.dump(my_array, f)
# read
with open('board.board', 'rb') as f:
my_array = pickle.load(f)
as you're reusing the file to append data to it, you should replace:
open('board.txt', 'w')
with
open('board.txt', 'a')
a denotes append mode. Which will not overwrite what you have in your file, it will append to it.
How can I make this work to delete values from my txt file? I have an 11 line file of 4 digit numbers, I can add but am stuck on deleting.
def delete_file(string_in):
print("deleted_from_file")
with open('test.txt', 'a+') as f:
d = f.readlines()
f.seek(11)
for i in d:
if i != " item = {} ":
f.write(i)
f.close()
a+ mode means to write at the end of the file. So the seek has no effect, it always writes the lines after all the existing lines.
It would be easier to just open the file separately for reading and writing. Otherwise you also need to truncate the file after all the writes.
BTW, you don't need to use f.close() when you use with -- it automatically closes (that's the whole point).
The lines returned by readlines() end with newlines, you need to strip those off before comparing with the string.
def delete_file(string_in):
print("deleted_from_file")
with open('test.txt', 'r') as f:
d = f.readlines()
with open('test.txt', 'w') as f:
for i in d:
if i.rstrip('\n') != " item = {} ":
f.write(i)
You can store all the needed lines into a list using a list comprehension, and then write the lines into the file again after the file empties out:
def delete_file(string_in):
print("deleted_from_file")
with open('test.txt', 'r') as f:
d = [i for i in f if i.strip('\n') != " item = {} "]
with open('test.txt', 'w') as f:
f.write(''.join(d))
I have working code that takes a directory of csv files and hashes one column of each line, then aggregates all files together. The issue is the output only displays the first hash value, not re-running the hash for each line. Here is the code:
import glob
import hashlib
files = glob.glob( '*.csv' )
output="combined.csv"
with open(output, 'w' ) as result:
for thefile in files:
f = open(thefile)
m = f.readlines()
for line in m[1:]:
fields = line.split()
hash_object = hashlib.md5(b'(fields[2])')
newline = fields[0],fields[1],hash_object.hexdigest(),fields[3]
joined_line = ','.join(newline)
result.write(joined_line+ '\n')
f.close()
You are creating a hash of a fixed bytestring b'(fields[2])'. That value has no relationship to your CSV data, even though it uses the same characters as are used in your row variable name.
You need to pass in bytes from your actual row:
hash_object = hashlib.md5(fields[2].encode('utf8'))
I am assuming your fields[2] column is a string, so you'd need to encoding it first to get bytes. The UTF-8 encoding can handle all codepoints that could possibly be contained in a string.
You also appear to be re-inventing the CSV reading and writing wheel; you probably should use the csv module instead:
import csv
# ...
with open(output, 'w', newline='') as result:
writer = csv.writer(result)
for thefile in files:
with open(thefile, newline='') as f:
reader = csv.reader(f)
next(reader, None) # skip first row
for fields in reader:
hash_object = hashlib.md5(fields[2].encode('utf8'))
newrow = fields[:2] + [hash_object.hexdigest()] + fields[3:]
writer.writerow(newrow)
I'm trying to write a very simple program using tuples. Which works for the most part but I can't really get it to work by accessing individual elements in the tuples.
I'm taking input from a file containing some info convert it to tuple and the store the data in some other file.
It works if I write all the data or just the first tuple but not in any other case. Following is the code
filename = "in.txt"
stock_market = []
for line in open(filename):
fields = line.split(",")
name = fields[0]
shares = int(fields[1])
stock = (name,shares)
portfolio.append(stock)
f = open("output.txt","w")
print >>f, portfolio[1]
f.close()
You can't append to portfolio without defining it first. Try something like this:
inFilename = "in.txt"
outFilename = "output.txt"
with open(inFilename, 'r') as inf:
with open(outFilename, 'w') as outf:
for line in inf:
fields = line.split(',')
print >>outf, (fields[0], fields[1])
I have these different lines with values in a text file
sample1:1
sample2:1
sample3:0
sample4:15
sample5:500
and I want the number after the ":" to be updated sometimes
I know I can split the name by ":" and get a list with 2 values.
f = open("test.txt","r")
lines = f.readlines()
lineSplit = lines[0].split(":",1)
lineSplit[1] #this is the value I want to change
im not quite sure how to update the lineSplit[1] value with the write functions
You can use the fileinput module, if you're trying to modify the same file:
>>> strs = "sample4:15"
Take the advantage of sequence unpacking to store the results in variables after splitting.
>>> sample, value = strs.split(':')
>>> sample
'sample4'
>>> value
'15'
Code:
import fileinput
for line in fileinput.input(filename, inplace = True):
sample, value = line.split(':')
value = int(value) #convert value to int for calculation purpose
if some_condition:
# do some calculations on sample and value
# modify sample, value if required
#now the write the data(either modified or still the old one) to back to file
print "{}:{}".format(sample, value)
Strings are immutable, meaning, you can't assign new values inside them by index.
But you can split up the whole file into a list of lines, and change individual lines (strings) entirely. This is what you're doing in lineSplit[1] = A_NEW_INTEGER
with open(filename, 'r') as f:
lines = f.read().splitlines()
for i, line in enumerate(lines):
if condition:
lineSplit = line.split(':')
lineSplit[1] = new_integer
lines[i] = ':'.join(lineSplit)
with open(filename, 'w') as f:
f.write('\n'.join(lines)
Maybe something as such (assuming that each first element before : is indeed a key):
from collections import OrderedDict
with open('fin') as fin:
samples = OrderedDict(line.split(':', 1) for line in fin)
samples['sample3'] = 'something else'
with open('output') as fout:
lines = (':'.join(el) + '\n' for el in samples.iteritems())
fout.writelines(lines)
Another option is to use csv module (: is a column delimiter in your case).
Assuming there is a test.txt file with the following content:
sample1:1
sample2:1
sample3:0
sample4:15
sample5:500
And you need to increment each value. Here's how you can do it:
import csv
# read the file
with open('test.txt', 'r') as f:
reader = csv.reader(f, delimiter=":")
lines = [line for line in reader]
# write the file
with open('test.txt', 'w') as f:
writer = csv.writer(f, delimiter=":")
for line in lines:
# edit the data here
# e.g. increment each value
line[1] = int(line[1]) + 1
writer.writerows(lines)
The contents of test.txt now is:
sample1:2
sample2:2
sample3:1
sample4:16
sample5:501
But, anyway, fileinput sounds more logical to use in your case (editing the same file).
Hope that helps.