I'm getting this error while I'm trying to read a csv in python with pandas
df02 = pd.read_csv('PMDM Full\filename.csv', sep = '|')
Traceback (most recent call last): File "<stdin>", line 1, in <module> File
"C:\Users\dm\Google Drive\CS\GV\Tickets\Status
Check\venv\lib\site-packages\pandas\util\_decorators.py", line 311, in
wrapper File "C:\Users\dm\Google Drive\CS\GV\Tickets\Status
Check\venv\lib\site-packages\pandas\io\parsers\readers.py", line 1250,
in read index, columns, col_dict = self._engine.read(nrows) File
"C:\Users\dm\Google Drive\CS\GV\Tickets\Status
Check\venv\lib\site-packages\pandas\io\parsers\c_parser_wrapper.py",
line 225, in read chunks = self._reader.read_low_memory(nrows)
File "pandas\_libs\parsers.pyx", line 805, in
pandas._libs.parsers.TextReader.read_low_memory File
"pandas\_libs\parsers.pyx", line 861, in
pandas._libs.parsers.TextReader._read_rows File
"pandas\_libs\parsers.pyx", line 847, in
pandas._libs.parsers.TextReader._tokenize_rows File
"pandas\_libs\parsers.pyx", line 1960, in
pandas._libs.parsers.raise_parser_error pandas.errors.ParserError:
Error tokenizing data. C error: Expected 109 fields in line 1021, saw
113
Code used:
df02 = pd.read_csv('filepath', sep = '|')
sample file
This error is occurring because line 5 of the csv contains a different number of columns than the other lines.
To read the file excluding this line you can use the following code:
df = pd.read_csv('sample.csv', sep='|', error_bad_lines=False)
Related
I am trying to import a tsv file of amazon customer reviews (size 1.2 gb) into pandas.
This is what I have tried:
import pandas as pd
tsv_books = pd.read_csv('/Users/hs/Downloads/amazon_reviews_us_Books_v1_02.tsv', sep='\t')
print(tsv_books)
I am getting these error messages:
Traceback (most recent call last):
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py", line 760, in _next_iter_line
line = next(self.data)
_csv.Error: ' ' expected after '"'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/hs/PycharmProjects/Thesis Analysis/main.py", line 4, in <module>
tsv_books = pd.read_csv('/Users/hs/Downloads/amazon_reviews_us_Books_v1_02.tsv', delimiter='\t', engine="python")
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/util/_decorators.py", line 311, in wrapper
return func(*args, **kwargs)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 680, in read_csv
return _read(filepath_or_buffer, kwds)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 581, in _read
return parser.read(nrows)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1250, in read
index, columns, col_dict = self._engine.read(nrows)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py", line 238, in read
content = self._get_lines(rows)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py", line 1091, in _get_lines
new_row = self._next_iter_line(row_num=self.pos + rows + 1)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py", line 789, in _next_iter_line
self._alert_malformed(msg, row_num)
File "/Users/hs/opt/anaconda3/envs/python39/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py", line 739, in _alert_malformed
raise ParserError(msg)
pandas.errors.ParserError: ' ' expected after '"'
This seems to be a rookie mistake, sorry guys. Nonetheless I would appreciate any help!
I cant read the whole file into a dataframe so I tried breaking it into very small chunks
for chunk in pd.read_csv(r'HugeFile.txt',chunksize=1000, sep=r"\s+", header=None):
df = pd.concat(chunk)
print(df)
But I am still getting this error
Traceback (most recent call last):
File "C:\Data Science\Python\main.py", line 13, in <module>
pd.read_csv(r'HugeFile.txt',chunksize=1000, sep=r"\s+", header=None):
File "C:\Data Science\Python\venv\lib\site-packages\pandas\io\parsers.py", line 1034, in __next__
return self.get_chunk()
File "C:\Data Science\Python\venv\lib\site-packages\pandas\io\parsers.py", line 1084, in get_chunk
return self.read(nrows=size)
File "C:\Data Science\Python\venv\lib\site-packages\pandas\io\parsers.py", line 1057, in read
index, columns, col_dict = self._engine.read(nrows)
File "C:\Data Science\Python\venv\lib\site-packages\pandas\io\parsers.py", line 2061, in read
data = self._reader.read(nrows)
File "pandas\_libs\parsers.pyx", line 756, in pandas._libs.parsers.TextReader.read
File "pandas\_libs\parsers.pyx", line 783, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas\_libs\parsers.pyx", line 827, in pandas._libs.parsers.TextReader._read_rows
File "pandas\_libs\parsers.pyx", line 814, in pandas._libs.parsers.TextReader._tokenize_rows
File "pandas\_libs\parsers.pyx", line 1951, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Expected 3 fields in line 8, saw 4
Thanks
I have three csv dataframes of tweets, each ~5M tweets. The following code for concatenating them exists with low memory error. My machine has 32GB memory. How can I assign more memory for this task in pandas?
df1 = pd.read_csv('tweets.csv')
df2 = pd.read_csv('tweets2.csv')
df3 = pd.read_csv('tweets3.csv')
frames = [df1, df2, df3]
result = pd.concat(frames)
result.to_csv('tweets_combined.csv')
The error is:
$ python concantenate_dataframes.py
sys:1: DtypeWarning: Columns (0,1,2,3,4,5,6,8,9,10,11,12,13,14,19,22,23,24) have mixed types.Specify dtype option on import or set low_memory=False.
Traceback (most recent call last):
File "concantenate_dataframes.py", line 19, in <module>
df2 = pd.read_csv('tweets2.csv')
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 676, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 454, in _read
data = parser.read(nrows)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 1133, in read
ret = self._engine.read(nrows)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 2037, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 859, in pandas._libs.parsers.TextReader.read
UPDATE: tried the suggestions in the answer and still get error
$ python concantenate_dataframes.py
Traceback (most recent call last):
File "concantenate_dataframes.py", line 18, in <module>
df1 = pd.read_csv('tweets.csv', low_memory=False, error_bad_lines=False)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 676, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 454, in _read
data = parser.read(nrows)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 1133, in read
ret = self._engine.read(nrows)
File "/home/mona/anaconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 2037, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 862, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 943, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 2070, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Buffer overflow caught - possible malformed input file.
File "pandas/_libs/parsers.pyx", line 874, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 928, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 915, in pandas._libs.parsers.TextReader._tokenize_rows
File "pandas/_libs/parsers.pyx", line 2070, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Buffer overflow caught - possible malformed input file.
I am running the code on Ubuntu 20.04 OS
I think this is problem with malformed data (some data not structure properly in tweets2.csv) for that you can use error_bad_lines=False and try to chnage engine from c to python like engine='python'
ex : df2 = pd.read_csv('tweets2.csv', error_bad_lines=False)
or
ex : df2 = pd.read_csv('tweets2.csv', engine='python')
or maybe
ex : df2 = pd.read_csv('tweets2.csv', engine='python', error_bad_lines=False)
but I recommand to identify those revord and repair that.
And also if you want hacky way to do this than use
1) https://askubuntu.com/questions/941480/how-to-merge-multiple-files-of-the-same-format-into-a-single-file
2) https://askubuntu.com/questions/656039/concatenate-multiple-files-without-headerenter link description here
Specify dtype option on import or set low_memory=False
I have a problem with reading a CSV file with pandas (I know there are other topics but I could not solve the problem). My code is:
import pandas as pd
f = pd.read_csv('1803Ltem.csv',sep='\t', dtype=object,)
The error I get is:
Traceback (most recent call last):
File "/username/username/Documents/first.py", line 362, in <module>
fuck = pd.read_csv('1803Ltem.csv',sep='\t', dtype=object,)
File "/Users/username/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 562, in parser_f
return _read(filepath_or_buffer, kwds)
File "/Users/username/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 325, in _read
return parser.read()
File "/Users/username/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 815, in read
ret = self._engine.read(nrows)
File "/Users/username/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 1314, in read
data = self._reader.read(nrows)
File "pandas/parser.pyx", line 805, in pandas.parser.TextReader.read (pandas/parser.c:8748)
File "pandas/parser.pyx", line 827, in pandas.parser.TextReader._read_low_memory (pandas/parser.c:9003)
File "pandas/parser.pyx", line 881, in pandas.parser.TextReader._read_rows (pandas/parser.c:9731)
File "pandas/parser.pyx", line 868, in pandas.parser.TextReader._tokenize_rows (pandas/parser.c:9602)
File "pandas/parser.pyx", line 1865, in pandas.parser.raise_parser_error (pandas/parser.c:23325)
pandas.io.common.CParserError: Error tokenizing data. C error: Expected 4 fields in line 4587, saw 5
What am I doing wrong?
Try adding the argument error_bad_lines=False to read_csv
The following worked for me by adding:
import pandas as pd
f = pd.read_csv('1803Ltem.csv',sep='\t', dtype=object,error_bad_lines=False)
I have a file with 3'502'379 rows and 3 columns. The following script is supposed to be executed but raises and error in the date handling line:
import matplotlib.pyplot as plt
import numpy as np
import csv
import pandas
path = 'data_prices.csv'
data = pandas.read_csv(path, sep=';')
data['DATE'] = pandas.to_datetime(data['DATE'], format='%Y%m%d')
This is the error that occurs:
Traceback (most recent call last):
File "C:\Program Files\Python35\lib\site-packages\pandas\indexes\base.py", line 1945, in get_loc
return self._engine.get_loc(key)
File "pandas\index.pyx", line 137, in pandas.index.IndexEngine.get_loc (pandas\index.c:4066)
File "pandas\index.pyx", line 159, in pandas.index.IndexEngine.get_loc (pandas\index.c:3930)
File "pandas\hashtable.pyx", line 675, in pandas.hashtable.PyObjectHashTable.get_item (pandas\hashtable.c:12408)
File "pandas\hashtable.pyx", line 683, in pandas.hashtable.PyObjectHashTable.get_item (pandas\hashtable.c:12359)
KeyError: 'DATE'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\data\script.py", line 15, in <module>
data['DATE'] = pandas.to_datetime(data['DATE'], format='%Y%m%d')
File "C:\Program Files\Python35\lib\site-packages\pandas\core\frame.py", line 1997, in __getitem__
return self._getitem_column(key)
File "C:\Program Files\Python35\lib\site-packages\pandas\core\frame.py", line 2004, in _getitem_column
return self._get_item_cache(key)
File "C:\Program Files\Python35\lib\site-packages\pandas\core\generic.py", line 1350, in _get_item_cache
values = self._data.get(item)
File "C:\Program Files\Python35\lib\site-packages\pandas\core\internals.py", line 3290, in get
loc = self.items.get_loc(item)
File "C:\Program Files\Python35\lib\site-packages\pandas\indexes\base.py", line 1947, in get_loc
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas\index.pyx", line 137, in pandas.index.IndexEngine.get_loc (pandas\index.c:4066)
File "pandas\index.pyx", line 159, in pandas.index.IndexEngine.get_loc (pandas\index.c:3930)
File "pandas\hashtable.pyx", line 675, in pandas.hashtable.PyObjectHashTable.get_item (pandas\hashtable.c:12408)
File "pandas\hashtable.pyx", line 683, in pandas.hashtable.PyObjectHashTable.get_item (pandas\hashtable.c:12359)
KeyError: 'DATE'
the '\ufeffDATE' in the first column name shows that your CSV file has a UTF-16 Byte Order Mark (BOM) signature so it must be read accordingly.
so try this when reading your CSV:
df = pd.read_csv(path, sep=';', encoding='utf-8-sig')
or as #EdChum suggested:
df = pd.read_csv(path, sep=';', encoding='utf-16')
both variants should work properly
PS this answer shows how to deal with BOMs