cp949 codec can't encode character error in python - python

I am using the code below to parse the XML format wikipedia training data into a pure text file:
from __future__ import print_function
import logging
import os.path
import six
import sys
from gensim.corpora import WikiCorpus
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) != 3:
print("Using: python process_wiki.py enwiki.xxx.xml.bz2 wiki.en.text")
sys.exit(1)
inp, outp = sys.argv[1:3]
space = " "
i = 0
output = open(outp, 'w')
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
for text in wiki.get_texts():
if six.PY3:
output.write(bytes(' '.join(text), 'utf-8').decode('utf-8') + '\n')
# ###another method###
# output.write(
# space.join(map(lambda x:x.decode("utf-8"), text)) + '\n')
else:
output.write(space.join(text) + "\n")
i = i + 1
if (i % 10000 == 0):
logger.info("Saved " + str(i) + " articles")
output.close()
logger.info("Finished Saved " + str(i) + " articles")
when I run this code, it gives me a following error message:
File "wiki_parser.py", line 42, in <module>
output.write(bytes(' '.join(text), 'utf-8').decode('utf-8') + '\n')
UnicodeEncodeError: 'cp949' codec can't encode character '\u1f00' in position 1537: illegal multibyte sequence
When I searched this error online, most answers told me to add 'utf-8' as the encoding which is already there. What could be the possible issue with the code?

Minimal example
The problem is that your file is opened with an implicit encoding (inferred from your system). I can recreate your issue as follows:
a = '\u1f00'
with open('f.txt', 'w', encoding='cp949') as f:
f.write(a)
Error message: UnicodeEncodeError: 'cp949' codec can't encode character '\u1f00' in position 0: illegal multibyte sequence
You have two options. Either open the file using an encoding which can encode the character you are using:
with open('f.txt', 'w', encoding='utf-8') as f:
f.write(a)
Or open the file as binary and write encoded bytes:
with open('f.txt', 'wb') as f:
f.write(a.encode('utf-8'))
Applied to your code:
I would replace this part:
output = open(outp, 'w')
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
for text in wiki.get_texts():
if six.PY3:
output.write(bytes(' '.join(text), 'utf-8').decode('utf-8') + '\n')
# ###another method###
# output.write(
# space.join(map(lambda x:x.decode("utf-8"), text)) + '\n')
else:
output.write(space.join(text) + "\n")
with this:
from io import open
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
with open(outp, 'w', encoding='utf=8') as output:
for text in wiki.get_texts():
output.write(u' '.join(text) + u'\n')
which should work in both Python 2 and Python 3.

Related

Encoding Error - charmap' codec can't encode character '\u015f'

It seems that I cannot encode the character '\u015f' (letter s with cedilla). Please could someone help?
from selenium import webdriver
import time
with open('Violators_UNGC1.csv', 'w',encoding='utf-8'.replace(u"\u015f", "ş")) as file:
file.write("Participants; Sectors; Countries; Expelled \n")
driver=webdriver.Chrome(executable_path='C:\webdrivers\chromedriver.exe')
driver.get('https://www.unglobalcompact.org/participation/report/cop/create-and-submit/expelled?page=1&per_page=250')
driver.maximize_window()
time.sleep(2)
for k in range(150):
Participants = driver.find_elements("xpath",'//td[#class="participant"]/a')
Sectors = driver.find_elements("xpath",'//td[#class="sector"]')
Countries = driver.find_elements("xpath",'//td[#class="country"]')
Expelled = driver.find_elements("xpath",'//td[#class="year"]')
time.sleep(1)
with open('Violators_UNGC1.csv', 'a') as file:
for i in range(len(Participants)):
file.write(Participants[i].text + ";" + Sectors[i].text + ";" + Countries[i].text + ";" + Expelled[i].text + "\n")
driver.close()
and I get an error message as per the below:
UnicodeEncodeError
Traceback (most recent call last) Cell In [15], line 28
26 with open('Violators_UNGC1.csv', 'a') as file:
27 for i in range(len(Participants)):
---> 28 file.write(Participants[i].text + ";" + Sectors[i].text + ";" + Countries[i].text + ";" + Expelled[i].text + "\n")
30 driver.close() File ~\AppData\Local\Programs\Python\Python311\Lib\encodings\cp1252.py:19, in IncrementalEncoder.encode(self, input, final)
18 def encode(self, input, final=False):
---> 19 return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u015f' in position 32: character maps to <undefined>
Thank you all !
As mentioned in comments, the default encoding of open is not fixed and should be declared explicitly. UTF-8 works for all Unicode characters. I also suggest opening the file once instead of re-opening it for each row write, and to use the csv module to write CSV files:
import csv
with open('Violators_UNGC1.csv', 'w', encoding='utf-8') as file:
w = csv.writer(file, delimiter=';')
w.writerow(['Participants','Sectors','Countries','Expelled'])
# Fake data for demonstration
Participants = 'oneş','twoş','threeş'
Sectors = 'sec1','sec2','sec3'
Countries = 'USA','Germany','France'
Expelled = 'A','B','C'
# zip returns all the first items in each group, then the 2nd, etc.
for row in zip(Participants, Sectors, Countries, Expelled):
w.writerow(row)
Output file:
Participants;Sectors;Countries;Expelled
oneş;sec1;USA;A
twoş;sec2;Germany;B
threeş;sec3;France;C

How to translate encoding by ansi into unicode

When I use the CountVectorizer in sklearn, it needs the file encoding in unicode, but my data file is encoding in ansi.
I tried to change the encoding to unicode using notepad++, then I use readlines, it cannot read all the lines, instead it can only read the last line. After that, I tried to read the line into data file, and write them into the new file by using unicode, but I failed.
def merge_file():
root_dir="d:\\workspace\\minibatchk-means\\data\\20_newsgroups\\"
resname='resule_final.txt'
if os.path.exists(resname):
os.remove(resname)
result = codecs.open(resname,'w','utf-8')
num = 1
for back_name in os.listdir(r'd:\\workspace\\minibatchk-means\\data\\20_newsgroups'):
current_dir = root_dir + str(back_name)
for filename in os.listdir(current_dir):
print num ,":" ,str(filename)
num = num+1
path=current_dir + "\\" +str(filename)
source=open(path,'r')
line = source.readline()
line = line.strip('\n')
line = line.strip('\r')
while line !="":
line = unicode(line,"gbk")
line = line.replace('\n',' ')
line = line.replace('\r',' ')
result.write(line + ' ')
line = source.readline()
else:
print 'End file :'+ str(filename)
result.write('\n')
source.close()
print 'End All.'
result.close()
The error message is :UnicodeDecodeError: 'gbk' codec can't decode bytes in position 0-1: illegal multibyte sequence
Oh,I find the way.
First, use chardet to detect string encoding.
Second,use codecs to input or output to the file in the specific encoding.
Here is the code.
import chardet
import codecs
import os
root_dir="d:\\workspace\\minibatchk-means\\data\\20_newsgroups\\"
num = 1
failed = []
for back_name in os.listdir("d:\\workspace\\minibatchk-means\\data\\20_newsgroups"):
current_dir = root_dir + str(back_name)
for filename in os.listdir(current_dir):
print num,":",str(filename)
num=num+1
path=current_dir+"\\"+str(filename)
content = open(path,'r').read()
source_encoding=chardet.detect(content)['encoding']
if source_encoding == None:
print '??' , filename
failed.append(filename)
elif source_encoding != 'utf-8':
content=content.decode(source_encoding,'ignore')
codecs.open(path,'w',encoding='utf-8').write(content)
print failed
Thanks for all your help.

Printing unicode string not correct

I use PyPDF2 to read a pdf file but get a unicode string.
I don't know what's the encoding, then try to dump first 8 chars to hex:
0000 005b 00d7 00c1 00e8 00d4 00c5 00d5 [......
What's these bytes means? is it utf-16be/le?
I try below code but output is wrong:
print outStr.encode('utf-16be').decode('utf-16')
嬀휀섀퐀씀픀
If print directly, python will report error:
UnicodeEncodeError: 'ascii' codec can't encode characters in position 1-7: ordinal not in range(128)
I am following the instruction from How To Extract Text From Pdf In Python
Code section as below:
import PyPDF2
import textract
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
def dumpUnicodeString(src, length=8):
result = []
for i in xrange(0, len(src), length):
unichars = src[i:i+length]
hex = ' '.join(["%04x" % ord(x) for x in unichars])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or '.') for x in unichars])
result.append("%04x %-*s %s\n" % (i*2, length*5, hex, printable))
return ''.join(result)
def extractPdfText(filePath=''):
fileObject = open(filePath, 'rb')
pdfFileReader = PyPDF2.PdfFileReader(fileObject)
totalPageNumber = pdfFileReader.numPages
currentPageNumber = 0
text = ''
while(currentPageNumber < totalPageNumber ):
pdfPage = pdfFileReader.getPage(currentPageNumber)
text = text + pdfPage.extractText()
currentPageNumber += 1
if(text == ''):
text = textract.process(filePath, method='tesseract', encoding='utf-8')
return text
if __name__ == '__main__':
pdfFilePath = 'a.pdf'
pdfText = extractPdfText(pdfFilePath)
#pdfText = pdfText[:7]
print dumpUnicodeString(pdfText)
print pdfText

Python 3.6 utf-8 UnicodeEncodeError

#!/usr/bin/env python3
import glob
import xml.etree.ElementTree as ET
filenames = glob.glob("C:\\Users\\####\\Desktop\\BNC2\\[A00-ZZZ]*.xml")
out_lines = []
for filename in filenames:
with open(filename, 'r', encoding="utf-8") as content:
tree = ET.parse(content)
root = tree.getroot()
for w in root.iter('w'):
lemma = w.get('hw')
pos = w.get('pos')
tag = w.get('c5')
out_lines.append(w.text + "," + lemma + "," + pos + "," + tag)
with open("C:\\Users\\####\\Desktop\\bnc.txt", "w") as out_file:
for line in out_lines:
line = bytes(line, 'utf-8').decode('utf-8', 'ignore')
out_file.write("{}\n".format(line))
Gives the error:
UnicodeEncodeError: 'charmap' codec can't encode character '\u2192' in position 0: character maps to undefined
I thought this line would have solved that:
line = bytes(line, 'utf-8').decode('utf-8', 'ignore')
You need to specify the encoding when opening the output file, same as you did with the input file:
with open("C:\\Users\\####\\Desktop\\bnc.txt", "w", encoding="utf-8") as out_file:
for line in out_lines:
out_file.write("{}\n".format(line))
If your script have multiple reads and writes and you want to have a particular encoding ( let's say utf-8) for all of them, we can change the default encoding too
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
We should use it only when we have multiple reads/writes though and should be done at the beginning of the script
Changing default encoding of Python?

decoding a .txt - 'utf-8' codec can't decode byte 0xf3

I am taking data, domains, from an excel file to a text file and then check the availability of the domains. The problem pops up when I try to use that text file after taking the data from the excel file.
This is the data in the excel file
arete.cl
cbsanbernardo.cl
ludala.cl
puntotactico.cl
sunriseskateboard.cl
ellegrand.cl
turismosantodomingo.cl
delotroladof.cl
produccionesmandala.cl
So, basically if I type manually the domains in the text file the script works fine. But if I take the domains from an excel file to a text file and then run the script this errors pops up:
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xf3 in position 194: invalid continuation byte
The same happens if I try to check the domains directly from the excel file.
So should I decode the .txt or the .xlsx? How can I do it?
#!/usr/bin/python
import pythonwhois
import openpyxl
from openpyxl import load_workbook
import os
pathx = 'path'
filex = 'file.xlsx'
print('**Availability of domains**')
os.chdir(pathx)
workbook = openpyxl.load_workbook(filex, data_only = True)
sheet = workbook.get_sheet_by_name('Dic')
domainsz = io.open(pathx + '\\domains.txt', 'a')
for i in range(1, 10):
domainx = sheet["A" + str(i * 2)].value
if domainx is not None:
domainsz.write(domainx + '\n')
print(domainx)
domainsz.close()
with gzip.open('domains.txt' + ".gz", "wb") as outfile:
outfile.write(bytes(plaintext, 'UTF-8'))
domains = []
available = []
unavailable = []
def getDomains():
with io.open('domains.txt', 'r', encoding='latin-1') as f:
for domainName in f.read().splitlines():
domains.append(domainName)
def run():
for dom in domains:
if dom is not None and dom != '':
details = pythonwhois.get_whois(dom)
if details['contacts']['registrant'] is not None:
unavailable.append(dom)
else:
available.append(dom)
def printAvailability():
print ("-----------------------------")
print ("Unavailable Domains: ")
print ("-----------------------------")
for un in unavailable:
print (un)
print ("\n")
print ("-----------------------------")
print ("Available Domains: ")
print ("-----------------------------")
for av in available:
print (av)
if __name__ == "__main__":
getDomains()
run()
printAvailability()

Categories

Resources