def country_to_continent(country_name):
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
return country_name
country_name = [i for i in df['country']]
country_to_continent(country_name)
this is my code and I want to loop my df['country'] and get the continent name but it has error TypeError: unhashable type: 'list'
my purpose is to convert a country to the continent and to calculate every sum of continent's suicides cases
This is my entire error message
TypeError Traceback (most recent call last)
<ipython-input-197-3be113b9bda3> in <module>
6
7 country_name = [i for i in df['country']]
----> 8 country_to_continent(country_name)
<ipython-input-197-3be113b9bda3> in country_to_continent(country_name)
1 def country_to_continent(country_name):
----> 2 country_alpha2 = pc.country_name_to_country_alpha2(country_name)
3 country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
4 country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
5 return country_name
D:\pythin\lib\site-packages\pycountry_convert\convert_countries.py in country_name_to_country_alpha2(cn_name, cn_name_format)
68 return cn_name
69
---> 70 if cn_name not in dict_country_name_to_country_alpha2:
71 raise KeyError("Invalid Country Name: '{0}'".format(cn_name))
72
TypeError: unhashable type: 'list'
Related
I hope you can help me with this issue, I've been having for a while. I keep getting this error no matter what i try:
This is the type as I know it:
tweets['post_date'] = pd.to_datetime(tweets['post_date'], unit='s')
tweets['date'] = pd.to_datetime(tweets['post_date'].apply(lambda date: date.date()))
tweets.head()
Output:
post_date body ticker_symbol date
19 2015-01-01 00:11:17 $UNP $ORCL $QCOM $MSFT $AAPL Top scoring mega ... MSFT 2015-01-01
43 2015-01-01 00:55:58 http://StockAviator.com....Top penny stocks, N... MSFT 2015-01-01
TypeError Traceback (most recent call last)
/usr/local/lib/python3.8/dist-packages/pandas/core/arrays/datetimelike.py in _validate_comparison_value(self, other)
539 try:
--> 540 self._check_compatible_with(other)
541 except (TypeError, IncompatibleFrequency) as err:
13 frames
TypeError: Cannot compare tz-naive and tz-aware datetime-like objects.
The above exception was the direct cause of the following exception:
InvalidComparison Traceback (most recent call last)
InvalidComparison: 2015-01-01 12:00:00-05:00
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
/usr/local/lib/python3.8/dist-packages/pandas/core/ops/invalid.py in invalid_comparison(left, right, op)
32 else:
33 typ = type(right).__name__
---> 34 raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}")
35 return res_values
36
TypeError: Invalid comparison between dtype=datetime64[ns] and Timestamp
This error comes from this column in my code:
#market opens 14:30 closes 21:00
def getAvgPerPrice (tweets,stockk):
stock = stockk.copy()
result = pd.DataFrame([])
for i in range(0,len(stock)-1):
d = stock.index[i]
next_d = stock.index[i+1]
wanted_tweets = tweets[((tweets.post_date - timedelta(hours = 3)) >=( d + timedelta(hours = h))) & ((tweets.post_date - timedelta(hours = 3)) < (next_d + timedelta(hours = h)))]
result.at[i,'date'] = d
result.at[i,'close'] = stock.iloc[i].Close
result.at[i,'avgScore'] = wanted_tweets['score'].mean()
I would really appreciate if anyone could help me find the issue. I have tried many things already but no luck. Thank you in advance
This question already has answers here:
Change type of pandas series/dataframe column inplace
(4 answers)
Closed 1 year ago.
I have some problem in my code
its result error "AttributeError: 'Series' object has no attribute 'float'"
i need solution to solve it
my code
def calculateED (pdX, pdY):
arg = (((pdX['NormalTemp']-pdY[0])**2)+
((pdX['NormalHumidity']-pdY[1])**2)+
((pdX['NormalOutlook']-pdY[2])**2)+
((pdX['NormalWindy']-pdY[3])**2)).float()
pdX['ED']= math.sqrt(arg1)
return pdX
dataTest = [64.0,65.0,1.0,1.0]
ngitung = calculateED(dataset, dataTest)
ngitung.sort_values(by=['ED'])
but after run this code i have some problem
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-21-034dd0366562> in <module>
1 dataTest = (64.0,65.0,1.0,1.0)
2 #dataTest2 = [72.0,95.0,3.0,0.0]
----> 3 ngitung = calculateED(dataset, dataTest)
4 #ngitung2 = calculateED2(dataset, dataTest2)
5 ngitung.sort_values(by=['ED'])
<ipython-input-15-5ae8a365490e> in calculateED(pdX, pdY)
1 def calculateED (pdX, pdY):
----> 2 arg = (((pdX['NormalTemp']-pdY[0])**2)+
3 ((pdX['NormalHumidity']-pdY[1])**2)+
4 ((pdX['NormalOutlook']-pdY[2])**2)+
5 ((pdX['NormalWindy']-pdY[3])**2)).float()
c:\users\windows\appdata\local\programs\python\python39\lib\site-packages\pandas\core\generic.py in __getattr__(self, name)
5463 if self._info_axis._can_hold_identifiers_and_holds_name(name):
5464 return self[name]
-> 5465 return object.__getattribute__(self, name)
5466
5467 def __setattr__(self, name: str, value) -> None:
AttributeError: 'Series' object has no attribute 'float'
According to this topic:
numpy functions require float type argument explicity
Example:
import numpy as np
your_array = your_array.float()
output = np.exp(your_array)
In your case:
def calculateED (pdX, pdY):
argument = ((pdX['NormalTemp']-pdY[0])**2)+
((pdX['NormalHumidity']-pdY[1])**2)+
((pdX['NormalOutlook']-pdY[2])**2)+
((pdX['NormalWindy']-pdY[3])**2).float()
pdX['ED']= np.sqrt(argument)
return pdX
Trying to get Moran's I. A test but getting "AttributeError: 'numpy.ndarray' object has no attribute 'transform'" because of the last line of code. Both of the variables are arrays and they look like this:
0 1024.798431
1 859.662720
2 870.632530
3 819.316065
4 930.600992
...
5567 842.415062
5568 991.513211
5569 908.701993
5570 909.431369
5571 644.946254
Name: Auxilio, Length: 5572, dtype: float64
peso_espacial:
array([876.56886196, 815.34772578, 871.227145 , ..., 903.74618016,
880.30363602, 885.61222452])
And the code is this:
mun = geobr.read_municipality(code_muni="all", year=2019)
mun = mun.rename(columns={'code_muni': 'Municipio'})
mun['Municipio'] = mun['Municipio'].astype(int).astype(str).str[:-1].astype(np.int64)
impacto2020 = pd.read_excel(path)
mapa = pd.merge(mun,impacto2020,on='Municipio',how='left')
qW = ps.lib.weights.Queen.from_dataframe(mapa)
qW.transform = 'r'
peso_espacial = lp.weights.lag_spatial(qW, mapa['Auxilio'])
peso_espacial = np.nan_to_num(peso_espacial)
auxilio = mapa['Auxilio']
from esda.moran import Moran
moran = Moran(auxilio, peso_espacial)
Don't know how to fix it, already tried converting it to a Series but it gets pretty much the same AttributeError. And this should return a number between 0 and 1.
Full error traceback:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-63-e8e3103abd6b> in <module>
1 from esda.moran import Moran
----> 2 moran = Moran(auxilio, peso_espacial)
/opt/anaconda3/lib/python3.8/site-packages/esda/moran.py in __init__(self, y, w, transformation, permutations, two_tailed)
159 y = np.asarray(y).flatten()
160 self.y = y
--> 161 w.transform = transformation
162 self.w = w
163 self.permutations = permutations
AttributeError: 'numpy.ndarray' object has no attribute 'transform'
I keep getting TypeError: unhashable type: 'numpy.ndarray' when I run this code. I'm unable to pinpoint the issue.
When I do .head() this is what I get:
|Year|Chinese|Malay|Indian|Others|
|----+-------+-----+------+------|
|2010| 96| 87.9| 92| 87.7|
|2011| 96.3| 89.3| 92.5| 91.7|
|2012| 97.1| 91| 92.5| 92.6|
|2013| 97.2| 91.7| 92.3| 93.6|
|2014| 97.5| 93.5| 93.2| 93.5|
My code:
df_tertiary = pd.read_csv('percentage-of-p1-cohort-that-progressed-to-post-secondary-education.csv',index_col=0);
df_tertiary_chi = df_tertiary[df_tertiary.race == 'Chinese']
df_tertiary_malay = df_tertiary[df_tertiary.race == 'Malay']
df_tertiary_indian = df_tertiary[df_tertiary.race == 'Indian']
df_tertiary_others = df_tertiary[df_tertiary.race == 'Others']
df_tertiary_total = pd.concat([df_tertiary_chi['percentage_p1_cohort_post_sec'],df_tertiary_malay['percentage_p1_cohort_post_sec'],df_tertiary_indian['percentage_p1_cohort_post_sec'],df_tertiary_others['percentage_p1_cohort_post_sec']], axis=1, sort=False)
df_tertiary_total.columns = ['Chinese','Malay','Indian','Others']
df_tertiary_total = df_tertiary_total.loc['2010':'2019']
df_tertiary_total = pd.DataFrame(df_tertiary_total)
display(df_tertiary_total.head())
plt.plot(df_tertiary_total)
plt.show()
Error:
TypeError Traceback (most recent call last)
<ipython-input-200-1000efab126d> in <module>
14 display(df_tertiary_total.head())
15
---> 16 plt.plot(df_tertiary_total)
17 plt.show()
Data Sample
Chinese Malay Indian Others
year
2016 97.8 93.1 93.8 93.9
I'm trying to use pymining on Python to generate frequent sequences from my dataset. My code below appears to be working well:
from pymining import seqmining
seqs = ( 'caabc', 'abcb', 'cabc', 'abbca')
freq_seqs = seqmining.freq_seq_enum(seqs, 2)
sorted(freq_seqs)
However, when i want to use it with my dataset:
import numpy as np
import pandas as pd
from pymining import seqmining
def importdata():
filename = pd.read_csv('C:/Users/asus/Desktop/memoire/sequences-code.csv', sep= ';', header = None)
data=importdata()
seqs = data
freq_seqs = seqmining.freq_seq_enum(seqs, 2)
sorted(freq_seqs)
I get this error:
TypeError: 'NoneType' object is not iterable
this is all the error:
TypeError Traceback (most recent call last)
<ipython-input-4-19e2af14465a> in <module>()
8 data=importdata()
9 seqs = data
---> 10 freq_seqs = seqmining.freq_seq_enum(seqs, 2)
11 sorted(freq_seqs)
12
~\Anaconda3\lib\site-packages\pymining\seqmining.py in freq_seq_enum(sequences, min_support)
9 '''
10 freq_seqs = set()
---> 11 _freq_seq(sequences, tuple(), 0, min_support, freq_seqs)
12 return freq_seqs
13
~\Anaconda3\lib\site-packages\pymining\seqmining.py in _freq_seq(sdb, prefix, prefix_support, min_support, freq_seqs)
16 if prefix:
17 freq_seqs.add((prefix, prefix_support))
---> 18 locally_frequents = _local_freq_items(sdb, prefix, min_support)
19 if not locally_frequents:
20 return
~\Anaconda3\lib\site-packages\pymining\seqmining.py in _local_freq_items(sdb, prefix, min_support)
28 items = defaultdict(int)
29 freq_items = []
---> 30 for entry in sdb:
31 visited = set()
32 for element in entry:
TypeError: 'NoneType' object is not iterable
The simplest change you can make to your code is to get rid of importdata, which is just a wrapper on pd.read_csv. Try:
filename = 'C:/Users/asus/Desktop/memoire/sequences-code.csv'
data = pd.read_csv(filename, sep=';', header=None)
Let me know if that helps.