Python doesn't create a new file - python

I'm using Python 3.4. I have a program in witch I have two files:
problems = open("out/problems.tex", 'w')
answers = open("out/answers.tex", 'w')
If I run this I get an error:
Traceback (most recent call last):
File "<encoding error>", line 10, in <module>
File "C:\Python33\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode characters in position 247-251: character maps to <undefined>
If I run this:
problems = open("out/problems.tex", 'w', encoding='utf8')
answers = open("out/answers.tex", 'w', encoding='utf8')
I get no errors but I also get no files 'problems.tex' and 'answers.tex'. Does anybody know what am I doing wrong?
Here is my full program:
from polygen import *
head_file = open("tex/head.tex", 'r', encoding="utf8")
ground_file = open("tex/ground.tex", 'r', encoding="utf8")
problems = open("out/problems.tex", 'w', encoding="utf8")
answers = open("out/answers.tex", 'w', encoding="utf8")
head = head_file.read()
problems.write(head)
answers.write(head)
t = polymult(7, 1, 2, 2, 3, 4)
problems.write(t[0])
answers.write(t[1])
ground = ground_file.read()
problems.write(ground)
answers.write(ground)
problems.close()
answers.close()

So weird. 2 suggestions:
Use an absolute path, make sure the file generated.
Use "rb"/"wb" mode instead "r"/"w" if you have non ASCii characters in source file

Related

UnicodeDecodeError: 'utf-8' codec can't decode byte 0xbf in position 355: invalid start byte

I've been trying to iterate through a csv file with the following code:
`
import csv
import os, sys
directory = "/Users/aliharam/Desktop/Lamis File"
files = []
for filename in os.listdir(directory):
f = os.path.join(directory, filename)
# checking if it is a file
if os.path.isfile(f):
files.append(f)
files.pop()
for i in files:
with open(i, 'r') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
print(row)
`
This is the error I am getting:
Traceback (most recent call last):
File "/Users/aliharam/PycharmProjects/LamisTasks/Normalization.py", line 16, in <module>
for row in datareader:
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xbf in position 355: invalid start byte
['\tAli Haram \tAli Haram ']
Process finished with exit code 1
How do I fix this?!!
I tried using
dataset = pd.read_csv(i, header= 0,
encoding= 'unicode_escape')
and
with io.open(filename, 'r', encoding='utf-8') as fn:
lines = fn.readlines()
both didn't work
The file your program reads contains character(at position 355) which does not belong to Unicode.
If we assume you are reading a Unicode encoded file, then there is an error in your data file. First you need to make sure the file your program reads is encoded in Unicode or not.

reading .dat file in python (Agilent 4294A Precision Impedance Analyzer)

I've been trying to read a .dat file from an Agilent impedance analyzer. I keep getting the same error regardless of the method I try. Any ideas how to get around this issue?
Thanks in advance.
# import csv
# Method 1
# with open("RP.dat") as infile, open("outfile.csv", "w") as outfile:
# csv_writer = csv.writer(outfile)
# prev = ''
# csv_writer.writerow(['ID', 'PARENT_ID'])
# for line in infile.read().splitlines():
# csv_writer.writerow([line, prev])
# prev = line
# Method 2
# import numpy as np
# filename = 'RP.dat'
# indata = np.loadtxt(filename)
# print(indata)
# Method 3
with open("RP.dat") as infile:
file_contents = infile.readlines()
print(file_contents)
C:\Users\benjy\Workspace\urop>python read_dat.py
Traceback (most recent call last):
File "C:\Users\benjy\Workspace\urop\read_dat.py", line 17, in <module>
file_contents = infile.readlines()
File "C:\Users\benjy\AppData\Local\Programs\Python\Python39\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x81 in position 672: character maps to <undefined>
You can use codecs library
import codecs
with codecs.open('RP.dat', errors='ignore', encoding='utf-8') as f:
dat = f.read()

Python UnicodeEncodeError involving 'charmap' codec

This code was working fine before but now when i try to write a list to a csv file I get this error -
File "C:/Users/wf5931/OneDrive - ENGIE/Documents/Python Scripts/Scrape Vehicle Reg Info/vehicleRegChecker 6.1.py", line 109, in openFile
writer.writerow(x)
File "C:\Users\wf5931\AppData\Local\Continuum\anaconda3\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u2082' in position 78: character maps to <undefined
from this:
with open(vehicleRegInformation, 'w', newline='') as f:
writer = csv.writer(f)
for x in vehicleRegInfo:
writer.writerow(x)
Try adding encoding="utf-8" :
with open(vehicleRegInformation, 'w', newline='',encoding="utf-8") as f:
writer = csv.writer(f)
for x in vehicleRegInfo:
writer.writerow(x)
Add encoding to the file opening
with open(vehicleRegInformation, 'w', newline='', encoding='utf8') as f:

return codecs.ascii_decode(input, self.errors)[0]

I am reading a songs file in csv format and I do not know what I am doing wrong.
import csv
import os
import random
file = open("songs.csv", "rU")
reader = csv.reader(file)
for song in reader:
print(song[0], song[1], song[2])
file.close()
This is the error:
Traceback (most recent call last):
File "/Users/kuku/Desktop/hey/mine/test.py", line 10, in <module>
for song in reader:
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 414: ordinal not in range(128)
try
for song in [unicode(song, 'utf-8') for song in reader]:
print(...)
With this bit of your code:
for song in reader:
print( song[0], song[1],song[2])
you are printing elements 0, 1 and 2 of the lines in reader during each iteration of the loop. This will cause a (different) error if there are fewer than 3 elements in total.
If you don't know that there will be at least 3 elements in each line, you could include the code in a try, except block:
with open("songs.csv", "r") as f:
song_reader = csv.reader(f)
for song_line in song_reader:
lyric = song_line
try:
print(lyric[0], lyric[1], lyric[2])
except:
pass # ...or preferably do something better
It's worth noting that in most cases it is preferable to open a file within a with block, as shown above. This negates the need for file.close().
You can open the file in utf-8 encoding.
file = open("songs.csv", "rU", encoding="utf-8")

How to read a text file with special characters in python

I am trying to read txt file with special characters like:
الْحَمْدُ لِلَّهِ رَبِّ الْعَالَمِينَ
I'm using:
import fileinput
fileToSearch = "test_encoding.txt"
with open(fileToSearch, 'r', encoding='utf-8') as file:
counter = 0;
for line in file:
print(line)
But Python crashes with this message:
Traceback (most recent call last):
File "test.py", line 9, in <module>
print(line)
File "C:\Users\atheelm\AppData\Local\Programs\Python\Python35-
32\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode characters in position 0-1:
character maps to <undefined>
I have Python 3.5.1 and I'm using Windows.
I'm running this command:
py test.py > out.txt
use 2 diff files and use io:
lines=["Init"]
with io.open(fileToSearch,'r',encoding='utf-8') as file:
counter = 1;
for line in file:
lines.insert(counter,str(line))
counter = counter+1
with io.open(out_file,'w',encoding='utf-8') as file:
for item in lines:
file.write("%s\n" % item)

Categories

Resources