concat Erro using join, multiIndex and tuples - python

How can I do this command otherwise
s1 = df.set_index('COTA').groupby(['TradingDesk', 'DATA4'])['DATA5'].idxmax()
s2 = s1.reset_index(level=0).groupby(['TradingDesk'])['DATA5'].shift(freq='BM')
df = df.join(pd.concat([s1.rename('COTA_LastDay'), s2.rename('COTA_LastDayPrevMonth')], axis=1), on=['TradingDesk', 'DATA4'])
this way it is giving the following error
File "<stdin>", line 1, in <module>
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\util\_decorators.py", line 331, in wrapper
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\reshape\concat.py", line 368, in concat
op = _Concatenator(
^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\reshape\concat.py", line 563, in __init__
self.new_axes = self._get_new_axes()
^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\reshape\concat.py", line 633, in _get_new_axes
return [
^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\reshape\concat.py", line 634, in <listcomp>
self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i)
^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\reshape\concat.py", line 640, in _get_comb_axis
return get_objs_combined_axis(
^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\indexes\api.py", line 105, in get_objs_combined_axis
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\indexes\api.py", line 158, in _get_combined_index
index = union_indexes(indexes, sort=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\indexes\api.py", line 310, in union_indexes
result = result.union(other, sort=None if sort else False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\eric.santos.INFINITY\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandas\core\indexes\base.py", line 3336, in union
raise NotImplementedError(
NotImplementedError: Can only union MultiIndex with MultiIndex or Index of tuples, try mi.to_flat_index().union(other) instead.
How do I solve this
When I run this code in jupyter lab it works ok, but when I run it in cmd it gives this error

Related

Collect for a timestamp column

When I execute the following statement:
spark.sql("SELECT CAST('0001-01-01' AS TIMESTAMP)").show()
I get:
CAST(0001-01-01 AS TIMESTAMP)
0001-01-01 00:00:00
but when I use spark.sql("SELECT CAST('0001-01-01' AS TIMESTAMP)").collect() I get the following error:
Fail to execute line 1: spark.sql("SELECT CAST('0001-01-01' AS TIMESTAMP)").collect()
Traceback (most recent call last):
File "/tmp/zeppelin_pyspark-6127737743421449115.py", line 380, in <module>
exec(code, _zcUserQueryNameSpace)
File "<stdin>", line 1, in <module>
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/sql/dataframe.py", line 535, in collect
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/serializers.py", line 147, in load_stream
yield self._read_with_length(stream)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/serializers.py", line 172, in _read_with_length
return self.loads(obj)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/serializers.py", line 580, in loads
return pickle.loads(obj, encoding=encoding)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/sql/types.py", line 1396, in <lambda>
return lambda *a: dataType.fromInternal(a)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/sql/types.py", line 633, in fromInternal
for f, v, c in zip(self.fields, obj, self._needConversion)]
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/sql/types.py", line 633, in <listcomp>
for f, v, c in zip(self.fields, obj, self._needConversion)]
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/sql/types.py", line 445, in fromInternal
return self.dataType.fromInternal(obj)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/sql/types.py", line 199, in fromInternal
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
ValueError: year 0 is out of range
In your timezone, 0001-01-01 would be treated as 0000-12-31 and the year value with 0 is invalid for the internal function ymd_to_ord() in the datetime package. See the reference
This is not happening in the scala version of spark.

pandas.errors.ParserError: Error tokenizing data with data which made no mistake before

I am trying to solve pandas.errors.ParserError: Error tokenizing data problem.
I have two types of data.
I use a same code but it does not work with a type of data as I attach below. (It works well with another)
(msnoise) [sujan#node01 MSNoise_test2]$ msnoise plot dvv
Traceback (most recent call last):
File "/home/sujan/anaconda3/envs/msnoise/bin/msnoise", line 8, in <module>
sys.exit(run())
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/msnoise/scripts/msnoise.py", line 1202, in run
cli(obj={})
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/msnoise/scripts/msnoise.py", line 943, in dvv
main(mov_stack, dttname, comp, filterid, pair, all, show, outfile)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/msnoise/plots/dvv.py", line 89, in main
df = pd.read_csv(day,sep=",", header=0, index_col=0, parse_dates=True)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/io/parsers.py", line 709, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/io/parsers.py", line 455, in _read
data = parser.read(nrows)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1069, in read
ret = self._engine.read(nrows)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/io/parsers.py", line 1839, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 902, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 924, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 978, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 965, in pandas._libs.parsers.TextReader._tokenize_rows
File "pandas/_libs/parsers.pyx", line 2208, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Expected 8 fields in line 114, saw 15
I add , error_bad_lines=False but it does not help and shows error as below.
(msnoise) [sujan#node01 MSNoise_test2]$ msnoise plot dvv
Skipping line 114: expected 8 fields, saw 15
(1, A EA EM EM0 M \
Date
2013-09-29 00:00:00 -0.076348 inf inf 0.000501 -0.002737
2013-09-29 00:00:00 0.014844 0.021573 0.001400 0.001239 0.000257
2013-09-29 00:00:00 -0.071597 0.002802 0.000144 0.001724 -0.000043
2013-09-29 00:00:00 -0.047929 inf inf 0.002285 0.001605
2013-09-29 00:00:00 -0.135391 inf inf 0.002244 0.011393
M0 Pairs
Date
2013-09-29 00:00:00 0.000836 05_TP01_05_TP10
2013-09-29 00:00:00 0.000558 05_TP02_05_TP10
2013-09-29 00:00:00 0.002713 05_TP09_05_TP10
2013-09-29 00:00:00 0.008074 05_TP01_05_TP09
2013-09-29 00:00:00 0.000346 05_TP02_05_TP09 )
Traceback (most recent call last):
File "/home/sujan/anaconda3/envs/msnoise/bin/msnoise", line 8, in <module>
sys.exit(run())
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/msnoise/scripts/msnoise.py", line 1202, in run
cli(obj={})
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/msnoise/scripts/msnoise.py", line 943, in dvv
main(mov_stack, dttname, comp, filterid, pair, all, show, outfile)
File "/home/sujan/anaconda3/envs/msnoise/lib/python2.7/site-packages/msnoise/plots/dvv.py", line 140, in main
tmp2 = allbut[dttname].resample('D').mean()
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/core/generic.py", line 5522, in resample
base=base, key=on, level=level)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/core/resample.py", line 999, in resample
return tg._get_resampler(obj, kind=kind)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/core/resample.py", line 1096, in _get_resampler
self._set_grouper(obj)
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/core/groupby.py", line 439, in _set_grouper
indexer = self.indexer = ax.argsort(kind='mergesort')
File "/home/sujan/.local/lib/python2.7/site-packages/pandas/core/indexes/base.py", line 2151, in argsort
return result.argsort(*args, **kwargs)
File "pandas/_libs/tslib.pyx", line 1165, in pandas._libs.tslib._Timestamp.__richcmp__
TypeError: Cannot compare type 'Timestamp' with type 'str'
However, the data with problem worked well until two weeks ago but suddenly shows the parsererror.
I even did not touch any data or results.
Additionally, the code that makes problems I think is like below.
for i, mov_stack in enumerate(mov_stacks):
current = start
first = True
alldf = []
while current <= end:
for comp in components:
day = os.path.join('DTT', "%02i" % filterid, "%03i_DAYS" % mov_stack, comp, '%s.txt' % current)
if os.path.isfile(day):
df = pd.read_csv(day, header=0, index_col=0, parse_dates=True)
alldf.append(df)
current += datetime.timedelta(days=1)
if len(alldf) == 0:
print("No Data for %s m%i f%i" % (components, mov_stack, filterid))
continue
the code day = os.path.join('DTT', "%02i" % filterid, "%03i_DAYS" % mov_stack, comp, '%s.txt' % current) reads txt file like below.
Date,A,EA,EM,EM0,M,M0,Pairs
2014-05-10,0.419549372718,inf,inf,0.000458496085412,-0.0160997929491,0.000732900920237,05_SS08_05_TP01
2014-05-10,-0.0429633365955,inf,inf,0.000525405329004,0.000306985380522,0.00237631297525,05_TP01_05_TP07
2014-05-10,0.067236405269,inf,inf,0.00256763292024,-0.000489522024887,0.000310750516333,05_SS08_05_TP10
2014-05-10,-0.0286482054004,inf,inf,0.00101017717763,-0.00188012718704,-0.00148293566406,05_SS02_05_SS05
But the data without problem has the same txt file format and there's no problem. So weird.
It makes my work all stopped.. So if you know what I have to do or need other information to solve this, please let me know.
I find the solution. The reason was the environment variable. I add python path there for solving no module problem which occurred before parsererror. But it was not the solution for the no module problem but to edit bashrc. Anyway, when I delete the python path in the environment variable and do all the steps (cc, mwcs etc), msnoise plot dvv finally works so well.

Issue TypeError: argument must be a string or number

There is only one categorical column and I want to encode it, it is working fine on notebook but when it is being uploaded to aicrowd platform it is creating this trouble.
There are totally 3 categorical features where one is the target feature, one is the row of ids and after excluding them for the training I am left with one feature.
df[['intersection_pos_rel_centre']]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df[['intersection_pos_rel_centre']]=le.fit_transform(df[['intersection_pos_rel_centre']])
df[['intersection_pos_rel_centre']]
My error is
Selecting runtime language: python
[NbConvertApp] Converting notebook predict.ipynb to notebook
[NbConvertApp] Executing notebook with kernel: python
Traceback (most recent call last):
File "/opt/conda/bin/jupyter-nbconvert", line 11, in <module>
sys.exit(main())
File "/opt/conda/lib/python3.8/site-packages/jupyter_core/application.py", line 254, in launch_instance
return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/traitlets/config/application.py", line 845, in launch_instance
app.start()
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 350, in start
self.convert_notebooks()
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 524, in convert_notebooks
self.convert_single_notebook(notebook_filename)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 489, in convert_single_notebook
output, resources = self.export_single_notebook(notebook_filename, resources, input_buffer=input_buffer)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 418, in export_single_notebook
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 181, in from_filename
return self.from_file(f, resources=resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 199, in from_file
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/notebook.py", line 32, in from_notebook_node
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 143, in from_notebook_node
nb_copy, resources = self._preprocess(nb_copy, resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 318, in _preprocess
nbc, resc = preprocessor(nbc, resc)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/base.py", line 47, in __call__
return self.preprocess(nb, resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 79, in preprocess
self.execute()
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/opt/conda/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 553, in async_execute
await self.async_execute_cell(
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 123, in async_execute_cell
cell, resources = self.preprocess_cell(cell, self.resources, cell_index)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 146, in preprocess_cell
cell = run_sync(NotebookClient.async_execute_cell)(self, cell, index, store_history=self.store_history)
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/opt/conda/lib/python3.8/site-packages/nest_asyncio.py", line 98, in run_until_complete
return f.result()
File "/opt/conda/lib/python3.8/asyncio/futures.py", line 178, in result
raise self._exception
File "/opt/conda/lib/python3.8/asyncio/tasks.py", line 280, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 852, in async_execute_cell
self._check_raise_for_error(cell, exec_reply)
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 760, in _check_raise_for_error
raise CellExecutionError.from_cell_and_msg(cell, exec_reply_content)
nbclient.exceptions.CellExecutionError: An error occurred while executing the following cell:
------------------
df[['intersection_pos_rel_centre']]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df[['intersection_pos_rel_centre']]=le.fit_transform(df[['intersection_pos_rel_centre']])
df[['intersection_pos_rel_centre']]
------------------
TypeError: argument must be a string or number

Grouping in Pandas

I want to group data in a dataframe I have oo the Column "Count" and by another column "State". I would like to output a list of list, each sub set list would just be the count for each state.
example output: [[120,200], [40, 20, 40], ...]
120 and 200 would be counts for let's say the State California
I tried the following:
df_new = df[['State']].groupby(['Count']).to_list()
I get a keyerror: 'count'
Traceback:
Traceback (most recent call last):
File "C:\Users\Michael\workspace\UCIIntrotoPythonDA\src\Michael_Madani_week3.py", line 84, in <module>
getStateCountsDF(filepath)
File "C:\Users\Michael\workspace\UCIIntrotoPythonDA\src\Michael_Madani_week3.py", line 81, in getStateCountsDF
df_new = df[['State']].groupby(['Count']).to_list()
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\generic.py", line 3159, in groupby
sort=sort, group_keys=group_keys, squeeze=squeeze)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\groupby.py", line 1199, in groupby
return klass(obj, by, **kwds)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\groupby.py", line 388, in __init__
level=level, sort=sort)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\groupby.py", line 2148, in _get_grouper
in_axis, name, gpr = True, gpr, obj[gpr]
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\frame.py", line 1797, in __getitem__
return self._getitem_column(key)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\frame.py", line 1804, in _getitem_column
return self._get_item_cache(key)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\generic.py", line 1084, in _get_item_cache
values = self._data.get(item)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\internals.py", line 2851, in get
loc = self.items.get_loc(item)
File "C:\Users\Michael\Anaconda\lib\site-packages\pandas\core\index.py", line 1572, in get_loc
return self._engine.get_loc(_values_from_object(key))
File "pandas\index.pyx", line 134, in pandas.index.IndexEngine.get_loc (pandas\index.c:3824)
File "pandas\index.pyx", line 154, in pandas.index.IndexEngine.get_loc (pandas\index.c:3704)
File "pandas\hashtable.pyx", line 686, in pandas.hashtable.PyObjectHashTable.get_item (pandas\hashtable.c:12280)
File "pandas\hashtable.pyx", line 694, in pandas.hashtable.PyObjectHashTable.get_item (pandas\hashtable.c:12231)
KeyError: 'Count'
I feel like this should be a simple line of code, what am I doing wrong here?
It is possible as a one-liner:
import pandas as pd
df = pd.DataFrame.from_dict({"State": ["ny", "or", "ny", "nm"],
"Counts": [100,300,200,400]})
list_new = df.groupby("State")["Counts"].apply(list).tolist()
print(list_new)
[[400], [100, 200], [300]]
You should read the doc of groupby to see what the expected outcome of the grouping is and how to change that (http://pandas.pydata.org/pandas-docs/stable/groupby.html).

Python-Reportlab error: ValueError: format not resolved

when I was using python-reportlab to create a pdf document, sometimes it throws out an exception: ValueError: format not resolved talk.google.com, I wonder why this came out, and how to solve it, the full error stack is like below:
File "/usr/lib64/python2.7/threading.py", line 552, in __bootstrap_inner
self.run()
File "/usr/lib64/python2.7/threading.py", line 505, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/site-packages/tweets2pdf/tweets2pdf.py", line 42,
in generate_thread
tpdoc.dump()
File "/usr/lib/python2.7/site-packages/tweets2pdf/pdfgen.py", line 609, in
dump
self.pdfdoc.build(self.elements, onFirstPage = self.on_first_page,
onLaterPages = self.on_later_pages)
File "/usr/lib64/python2.7/site-
packages/reportlab/platypus/doctemplate.py", line 1117, in build
BaseDocTemplate.build(self,flowables, canvasmaker=canvasmaker)
File "/usr/lib64/python2.7/site-
packages/reportlab/platypus/doctemplate.py", line 906, in build
self._endBuild()
File "/usr/lib64/python2.7/site-
packages/reportlab/platypus/doctemplate.py", line 848, in _endBuild
if getattr(self,'_doSave',1): self.canv.save()
File "/usr/lib64/python2.7/site-packages/reportlab/pdfgen/canvas.py", line
1123, in save
self._doc.SaveToFile(self._filename, self)
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 235, in SaveToFile
f.write(self.GetPDFData(canvas))
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 257, in GetPDFData
return self.format()
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 417, in format
IOf = IO.format(self)
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 869, in format
fcontent = format(self.content, document, toplevel=1) # yes this is at
top level
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 102, in format
f = element.format(document)
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 1635, in format
return D.format(document)
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 667, in format
L = [(format(PDFName(k),document)+" "+format(dict[k],document)) for k in
keys]
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 102, in format
f = element.format(document)
File "/usr/lib64/python2.7/site-packages/reportlab/pdfbase/pdfdoc.py",
line 1764, in format
if f is None: raise ValueError, "format not resolved %s" % self.name
ValueError: format not resolved talk.google.com

Categories

Resources