Cannot Convert list of list using pandas - python
This is my Data from the api from the looks of it, it is a list of list.
with ApiClient(configuration) as api_client:
api_instance = MetricsApi(api_client)
response = api_instance.query_metrics(
_from=int(yesterday_start_dt.timestamp()),
to=int(yesterday_end_dt.timestamp()),
query="default_zero(sum:trace.servlet.request.hits{env:prd-main,service:api}.as_rate())",
)
result = response['series'][0]['pointlist']
print(result)
[[1648339200000.0, 1105.8433333333332], [1648339500000.0, 1093.3266666666666], [1648339800000.0, 1076.92], [1648340100000.0, 1059.5133333333333], [1648340400000.0, 1053.8966666666668], [1648340700000.0, 1041.2166666666667], [1648341000000.0, 1055.0533333333333], [1648341300000.0, 1037.8933333333334], [1648341600000.0, 1015.4], [1648341900000.0, 1003.3233333333334], [1648342200000.0, 1017.02], [1648342500000.0, 1017.7766666666666], [1648342800000.0, 1011.0333333333333], [1648343100000.0, 993.9366666666666], [1648343400000.0, 973.9733333333334], [1648343700000.0, 967.8433333333334], [1648344000000.0, 933.2166666666667], [1648344300000.0, 945.0833333333334], [1648344600000.0, 905.2166666666667], [1648344900000.0, 923.9966666666667], [1648345200000.0, 925.4633333333334], [1648345500000.0, 915.5533333333333], [1648345800000.0, 918.8966666666666], [1648346100000.0, 883.6], [1648346400000.0, 908.9166666666666], [1648346700000.0, 856.7333333333333], [1648347000000.0, 873.01], [1648347300000.0, 833.99], [1648347600000.0, 846.5466666666666], [1648347900000.0, 820.7833333333333], [1648348200000.0, 821.4633333333334], [1648348500000.0, 812.8633333333333], [1648348800000.0, 817.78], [1648349100000.0, 821.91], [1648349400000.0, 791.17], [1648349700000.0, 780.3066666666666], [1648350000000.0, 803.4633333333334], [1648350300000.0, 781.9033333333333], [1648350600000.0, 759.4933333333333], [1648350900000.0, 746.11], [1648351200000.0, 731.3133333333334], [1648351500000.0, 724.0533333333333], [1648351800000.0, 710.56], [1648352100000.0, 722.87], [1648352400000.0, 677.5266666666666], [1648352700000.0, 681.7833333333333], [1648353000000.0, 679.9233333333333], [1648353300000.0, 650.6466666666666], [1648353600000.0, 663.78], [1648353900000.0, 650.8133333333334], [1648354200000.0, 645.9133333333333], [1648354500000.0, 642.4566666666667], [1648354800000.0, 627.93], [1648355100000.0, 616.65], [1648355400000.0, 609.94], [1648355700000.0, 602.0733333333334], [1648356000000.0, 581.6133333333333], [1648356300000.0, 592.48], [1648356600000.0, 593.4], [1648356900000.0, 582.2633333333333], [1648357200000.0, 598.3766666666667], [1648357500000.0, 589.99], [1648357800000.0, 577.7433333333333], [1648358100000.0, 570.1733333333333], [1648358400000.0, 592.58], [1648358700000.0, 578.2533333333333], [1648359000000.0, 586.8833333333333], [1648359300000.0, 590.4033333333333], [1648359600000.0, 601.49], [1648359900000.0, 594.8], [1648360200000.0, 609.01], [1648360500000.0, 620.08], [1648360800000.0, 642.6466666666666], [1648361100000.0, 635.93], [1648361400000.0, 638.42], [1648361700000.0, 645.2], [1648362000000.0, 650.42], [1648362300000.0, 667.88], [1648362600000.0, 689.3666666666667], [1648362900000.0, 694.4433333333334], [1648363200000.0, 690.3933333333333], [1648363500000.0, 710.55], [1648363800000.0, 706.3], [1648364100000.0, 729.5], [1648364400000.0, 771.36], [1648364700000.0, 754.03], [1648365000000.0, 771.4866666666667], [1648365300000.0, 767.52], [1648365600000.0, 779.4133333333333], [1648365900000.0, 800.4266666666666], [1648366200000.0, 788.41], [1648366500000.0, 806.8666666666667], [1648366800000.0, 805.7466666666667], [1648367100000.0, 815.2433333333333], [1648367400000.0, 828.0833333333334], [1648367700000.0, 817.1966666666667], [1648368000000.0, 879.4733333333334], [1648368300000.0, 840.7933333333333], [1648368600000.0, 846.4266666666666], [1648368900000.0, 848.1266666666667], [1648369200000.0, 836.9066666666666], [1648369500000.0, 845.4966666666667], [1648369800000.0, 863.5033333333333], [1648370100000.0, 867.1866666666666], [1648370400000.0, 866.74], [1648370700000.0, 863.8066666666666], [1648371000000.0, 882.38], [1648371300000.0, 876.0233333333333], [1648371600000.0, 905.3366666666667], [1648371900000.0, 879.8066666666666], [1648372200000.0, 878.37], [1648372500000.0, 876.9333333333333], [1648372800000.0, 868.1533333333333], [1648373100000.0, 882.12], [1648373400000.0, 896.9233333333333], [1648373700000.0, 872.84], [1648374000000.0, 880.71], [1648374300000.0, 894.8066666666666], [1648374600000.0, 873.7266666666667], [1648374900000.0, 891.0033333333333], [1648375200000.0, 927.2433333333333], [1648375500000.0, 905.52], [1648375800000.0, 895.0233333333333], [1648376100000.0, 895.86], [1648376400000.0, 899.3133333333334], [1648376700000.0, 920.22], [1648377000000.0, 937.68], [1648377300000.0, 916.46], [1648377600000.0, 926.6833333333333], [1648377900000.0, 936.4366666666666], [1648378200000.0, 947.6133333333333], [1648378500000.0, 957.7133333333334], [1648378800000.0, 989.1133333333333], [1648379100000.0, 959.0766666666667], [1648379400000.0, 963.5133333333333], [1648379700000.0, 978.3466666666667], [1648380000000.0, 1017.78], [1648380300000.0, 989.7566666666667], [1648380600000.0, 1023.4633333333334], [1648380900000.0, 1033.7166666666667], [1648381200000.0, 1025.1933333333334], [1648381500000.0, 1045.8633333333332], [1648381800000.0, 1063.6133333333332], [1648382100000.0, 1078.45], [1648382400000.0, 1116.3866666666668], [1648382700000.0, 1098.9766666666667], [1648383000000.0, 1101.29], [1648383300000.0, 1127.6], [1648383600000.0, 1102.5233333333333], [1648383900000.0, 1140.84], [1648384200000.0, 1169.23], [1648384500000.0, 1158.6], [1648384800000.0, 1180.01], [1648385100000.0, 1190.43], [1648385400000.0, 1207.3733333333332], [1648385700000.0, 1212.7666666666667], [1648386000000.0, 1244.17], [1648386300000.0, 1245.3166666666666], [1648386600000.0, 1240.69], [1648386900000.0, 1270.33], [1648387200000.0, 1277.8033333333333], [1648387500000.0, 1270.5966666666666], [1648387800000.0, 1304.4266666666667], [1648388100000.0, 1295.6933333333334], [1648388400000.0, 1322.3066666666666], [1648388700000.0, 1351.41], [1648389000000.0, 1339.9566666666667], [1648389300000.0, 1353.2966666666666], [1648389600000.0, 1398.45], [1648389900000.0, 1378.21], [1648390200000.0, 1361.0933333333332], [1648390500000.0, 1404.0833333333333], [1648390800000.0, 1394.6466666666668], [1648391100000.0, 1391.1366666666668], [1648391400000.0, 1450.0], [1648391700000.0, 1438.97], [1648392000000.0, 1411.83], [1648392300000.0, 1432.8233333333333], [1648392600000.0, 1473.3966666666668], [1648392900000.0, 1491.0166666666667], [1648393200000.0, 1509.8766666666668], [1648393500000.0, 1488.6566666666668], [1648393800000.0, 1488.4933333333333], [1648394100000.0, 1511.4466666666667], [1648394400000.0, 1508.3566666666666], [1648394700000.0, 1507.8966666666668], [1648395000000.0, 1515.8633333333332], [1648395300000.0, 1517.3], [1648395600000.0, 1528.81], [1648395900000.0, 1546.1266666666668], [1648396200000.0, 1554.57], [1648396500000.0, 1584.0333333333333], [1648396800000.0, 1584.45], [1648397100000.0, 1590.4633333333334], [1648397400000.0, 1580.0066666666667], [1648397700000.0, 1596.3833333333334], [1648398000000.0, 1571.96], [1648398300000.0, 1583.8233333333333], [1648398600000.0, 1618.7033333333334], [1648398900000.0, 1588.12], [1648399200000.0, 1599.56], [1648399500000.0, 1604.1833333333334], [1648399800000.0, 1621.5666666666666], [1648400100000.0, 1598.98], [1648400400000.0, 1627.02], [1648400700000.0, 1612.7833333333333], [1648401000000.0, 1612.2433333333333], [1648401300000.0, 1572.89], [1648401600000.0, 1601.8933333333334], [1648401900000.0, 1612.5366666666666], [1648402200000.0, 1608.7266666666667], [1648402500000.0, 1594.4366666666667], [1648402800000.0, 1614.3366666666666], [1648403100000.0, 1649.0733333333333], [1648403400000.0, 1627.12], [1648403700000.0, 1644.9633333333334], [1648404000000.0, 1653.9033333333334], [1648404300000.0, 1636.6966666666667], [1648404600000.0, 1639.5733333333333], [1648404900000.0, 1627.3866666666668], [1648405200000.0, 1626.3733333333332], [1648405500000.0, 1616.7966666666666], [1648405800000.0, 1667.2933333333333], [1648406100000.0, 1637.0733333333333], [1648406400000.0, 1654.6366666666668], [1648406700000.0, 1673.9566666666667], [1648407000000.0, 1658.4466666666667], [1648407300000.0, 1650.6766666666667], [1648407600000.0, 1662.1933333333334], [1648407900000.0, 1686.9733333333334], [1648408200000.0, 1623.0433333333333], [1648408500000.0, 1630.2866666666666], [1648408800000.0, 1599.0466666666666], [1648409100000.0, 1624.8033333333333], [1648409400000.0, 1606.0333333333333], [1648409700000.0, 1594.15], [1648410000000.0, 1557.1333333333334], [1648410300000.0, 1630.6133333333332], [1648410600000.0, 1591.93], [1648410900000.0, 1579.5733333333333], [1648411200000.0, 1585.1466666666668], [1648411500000.0, 1565.6166666666666], [1648411800000.0, 1566.3366666666666], [1648412100000.0, 1544.1866666666667], [1648412400000.0, 1511.8166666666666], [1648412700000.0, 1525.2333333333333], [1648413000000.0, 1505.57], [1648413300000.0, 1462.9033333333334], [1648413600000.0, 1478.0733333333333], [1648413900000.0, 1460.76], [1648414200000.0, 1504.59], [1648414500000.0, 1460.3366666666666], [1648414800000.0, 1445.9366666666667], [1648415100000.0, 1410.0033333333333], [1648415400000.0, 1412.8466666666666], [1648415700000.0, 1364.8933333333334], [1648416000000.0, 1348.4], [1648416300000.0, 1338.3333333333333], [1648416600000.0, 1326.8633333333332], [1648416900000.0, 1276.24], [1648417200000.0, 1310.0333333333333], [1648417500000.0, 1285.63], [1648417800000.0, 1244.14], [1648418100000.0, 1258.38], [1648418400000.0, 1218.37], [1648418700000.0, 1182.0266666666666], [1648419000000.0, 1196.8133333333333], [1648419300000.0, 1144.54], [1648419600000.0, 1165.62], [1648419900000.0, 1122.0166666666667], [1648420200000.0, 1112.6766666666667], [1648420500000.0, 1102.6], [1648420800000.0, 1095.6966666666667], [1648421100000.0, 1056.63], [1648421400000.0, 1074.5066666666667], [1648421700000.0, 1047.5933333333332], [1648422000000.0, 1057.2633333333333], [1648422300000.0, 1043.99], [1648422600000.0, 1003.4033333333333], [1648422900000.0, 1022.2633333333333], [1648423200000.0, 1016.59], [1648423500000.0, 997.4466666666667], [1648423800000.0, 988.7666666666667], [1648424100000.0, 966.1666666666666], [1648424400000.0, 991.21], [1648424700000.0, 977.6633333333333], [1648425000000.0, 959.64], [1648425300000.0, 961.6989966555184]]
But when i try to convert into pandas it convert but the result is what i expected, i expected that the dataframe will make two columns.
with ApiClient(configuration) as api_client:
api_instance = MetricsApi(api_client)
response = api_instance.query_metrics(
_from=int(yesterday_start_dt.timestamp()),
to=int(yesterday_end_dt.timestamp()),
query="default_zero(sum:trace.servlet.request.hits{env:prd-main,service:api}.as_rate())",
)
result = response['series'][0]['pointlist']
df = pd.DataFrame(result)
print(df)
0
0 [1648339200000.0, 1105.8433333333332]
1 [1648339500000.0, 1093.3266666666666]
2 [1648339800000.0, 1076.92]
3 [1648340100000.0, 1059.5133333333333]
4 [1648340400000.0, 1053.8966666666668]
.. ...
283 [1648424100000.0, 966.1666666666666]
284 [1648424400000.0, 991.21]
285 [1648424700000.0, 977.6633333333333]
286 [1648425000000.0, 959.64]
287 [1648425300000.0, 961.6989966555184]
[288 rows x 1 columns]
As I pointed out in the comment referring to another answer, here how you may do it.
columns = ['point 1', 'point 2']
result = response['series'][0]['pointlist']
df = pd.DataFrame(result,columns=columns)
If you want the first elements of sublists to be in one column, I suggest you create an intermediate np.array and then reshape it into the needed output.
array_results=np.array(results)
df=pd.DataFrame(array_results.reshape(2, len(results)))
following few samples of 'results', here are my outputs
Related
How to convert independent output lists to a dataframe
Hope you are having a great weekend. My problem is as follows: For my designed model i am getting the following predictions: [0.3182012736797333, 0.6817986965179443, 0.5067878365516663, 0.49321213364601135, 0.4795221984386444, 0.520477831363678, 0.532780110836029, 0.46721988916397095, 0.3282901346683502, 0.6717098355293274] [0.362120658159256, 0.6378793120384216, 0.5134761929512024, 0.4865237772464752, 0.46048662066459656, 0.539513349533081, 0.5342788100242615, 0.4657211899757385, 0.34932515025138855, 0.6506748199462891] [0.3647380471229553, 0.6352618932723999, 0.5087167620658875, 0.49128326773643494, 0.4709164798259735, 0.5290834903717041, 0.5408024787902832, 0.4591975510120392, 0.37024226784706116, 0.6297577023506165] [0.43765324354171753, 0.5623468160629272, 0.505147397518158, 0.49485257267951965, 0.45281311869621277, 0.5471869111061096, 0.5416161417961121, 0.45838382840156555, 0.3789178133010864, 0.6210821866989136] [0.44772887229919434, 0.5522711277008057, 0.5119441151618958, 0.48805591464042664, 0.46322566270828247, 0.5367743372917175, 0.5402485132217407, 0.45975151658058167, 0.4145151972770691, 0.5854847431182861] [0.35674020648002625, 0.6432597637176514, 0.48104971647262573, 0.5189502835273743, 0.4554695188999176, 0.54453045129776, 0.5409557223320007, 0.45904430747032166, 0.3258989453315735, 0.6741010546684265] [0.3909384310245514, 0.609061598777771, 0.4915180504322052, 0.5084819793701172, 0.45033228397369385, 0.5496677160263062, 0.5267384052276611, 0.47326159477233887, 0.34493446350097656, 0.6550655364990234] [0.32971733808517456, 0.6702827215194702, 0.5224012732505798, 0.47759872674942017, 0.4692566692829132, 0.5307433605194092, 0.5360044836997986, 0.4639955163002014, 0.41811054944992065, 0.5818894505500793] [0.37096619606018066, 0.6290338039398193, 0.5165190100669861, 0.4834809899330139, 0.4739859998226166, 0.526013970375061, 0.5340168476104736, 0.46598318219184875, 0.3438771069049835, 0.6561229228973389] [0.4189890921115875, 0.5810109376907349, 0.52749103307724, 0.47250890731811523, 0.44485437870025635, 0.5551456212997437, 0.5398098230361938, 0.46019014716148376, 0.3739124536514282, 0.6260875463485718] [0.3979812562465668, 0.6020187139511108, 0.5050275325775146, 0.49497246742248535, 0.4653399884700775, 0.5346599817276001, 0.537341833114624, 0.4626581072807312, 0.33742010593414307, 0.6625799536705017] [0.368088960647583, 0.631911039352417, 0.49925288558006287, 0.5007471442222595, 0.4547160863876343, 0.545283854007721, 0.5408452749252319, 0.45915472507476807, 0.4053747355937958, 0.5946252346038818] As you can see they are independent lists. I want to convert these lists into a dataframe. Although they are independent, they are coming out of a for loop, so i cannot append them because they are not coming at once.
Use: data = [[0.3182012736797333, 0.6817986965179443, 0.5067878365516663, 0.49321213364601135, 0.4795221984386444, 0.520477831363678, 0.532780110836029, 0.46721988916397095, 0.3282901346683502, 0.6717098355293274], [0.362120658159256, 0.6378793120384216, 0.5134761929512024, 0.4865237772464752, 0.46048662066459656, 0.539513349533081, 0.5342788100242615, 0.4657211899757385, 0.34932515025138855, 0.6506748199462891], [0.3647380471229553, 0.6352618932723999, 0.5087167620658875, 0.49128326773643494, 0.4709164798259735, 0.5290834903717041, 0.5408024787902832, 0.4591975510120392, 0.37024226784706116, 0.6297577023506165], [0.43765324354171753, 0.5623468160629272, 0.505147397518158, 0.49485257267951965, 0.45281311869621277, 0.5471869111061096, 0.5416161417961121, 0.45838382840156555, 0.3789178133010864, 0.6210821866989136], [0.44772887229919434, 0.5522711277008057, 0.5119441151618958, 0.48805591464042664, 0.46322566270828247, 0.5367743372917175, 0.5402485132217407, 0.45975151658058167, 0.4145151972770691, 0.5854847431182861], [0.35674020648002625, 0.6432597637176514, 0.48104971647262573, 0.5189502835273743, 0.4554695188999176, 0.54453045129776, 0.5409557223320007, 0.45904430747032166, 0.3258989453315735, 0.6741010546684265], [0.3909384310245514, 0.609061598777771, 0.4915180504322052, 0.5084819793701172, 0.45033228397369385, 0.5496677160263062, 0.5267384052276611, 0.47326159477233887, 0.34493446350097656, 0.6550655364990234], [0.32971733808517456, 0.6702827215194702, 0.5224012732505798, 0.47759872674942017, 0.4692566692829132, 0.5307433605194092, 0.5360044836997986, 0.4639955163002014, 0.41811054944992065, 0.5818894505500793], [0.37096619606018066, 0.6290338039398193, 0.5165190100669861, 0.4834809899330139, 0.4739859998226166, 0.526013970375061, 0.5340168476104736, 0.46598318219184875, 0.3438771069049835, 0.6561229228973389], [0.4189890921115875, 0.5810109376907349, 0.52749103307724, 0.47250890731811523, 0.44485437870025635, 0.5551456212997437, 0.5398098230361938, 0.46019014716148376, 0.3739124536514282, 0.6260875463485718], [0.3979812562465668, 0.6020187139511108, 0.5050275325775146, 0.49497246742248535, 0.4653399884700775, 0.5346599817276001, 0.537341833114624, 0.4626581072807312, 0.33742010593414307, 0.6625799536705017], [0.368088960647583, 0.631911039352417, 0.49925288558006287, 0.5007471442222595, 0.4547160863876343, 0.545283854007721, 0.5408452749252319, 0.45915472507476807, 0.4053747355937958, 0.5946252346038818]] # Create this before your for loop df = pd.DataFrame(columns = range(10)) for pred_list in data: #Add this within your for loop df = df.append(pd.Series(pred_list), ignore_index=True) output:
Failing while passing dataframe column seperated by comma into a API
I have to pass locations to API to retrieve values. Working Code dfs = [] locations = ['ZRH','SIN'] for loc in locations: response = requests.get(f'https://risk.dev.tyche.eu-central-1.aws.int.kn/il/risk/location/{loc}', headers=headers, verify=False) if 'items' in data: df = pd.json_normalize(data, 'items', 'totalItems') df1 = pd.concat([pd.DataFrame(x) for x in df.pop('relatedEntities')], keys=df.index).add_prefix('relatedEntities.') df3 = df.join((df1).reset_index(level=1, drop=True)) dfs.append(df3) df = pd.concat(dfs, ignore_index=True) Failing Code ( while passing as parameter) When I try to pass location as parameter which is created another dataframe column it fails. Unique_Location = data['LOCATION'].unique() Unique_Location = pd.DataFrame( list(zip(Unique_Location)), columns =['Unique_Location']) t= ','.join(map(repr,Unique_Location['Unique_Location'] )) locations = [t] for loc in locations: response = requests.get(f'https://risk.dev.logindex.com/il/risk/location/{loc}', headers=headers) data = json.loads(response.text) df = pd.json_normalize(data, 'items', 'totalItems') What is wrong in my code? Error `c:\users\ashok.eapen\pycharmprojects\rs-components\venv\lib\site-packages\pandas\io\json\_normalize.py in _pull_records(js, spec) 246 if has non iterable value. 247 """ --> 248 result = _pull_field(js, spec) 249 250 # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not c:\users\ashok.eapen\pycharmprojects\rs-components\venv\lib\site-packages\pandas\io\json\_normalize.py in _pull_field(js, spec) 237 result = result[field] 238 else: --> 239 result = result[spec] 240 return result 241 KeyError: 'items' `
You can test if items exist in json like: dfs = [] locations = ['NZAKL', 'NZ23-USBCH', 'DEBAD', 'ARBUE', 'AR02_GSTI', 'AEJEA', 'UYMVD', 'UY03', 'AE01_GSTI', 'TH02_GSTI', 'JO01_GSTI', 'ITSIM', 'GB75_GSTI', 'DEAMA', 'DE273_GSTI', 'ITPRO', 'AT07_GSTI', 'FR05', 'FRHAU', 'FR01_GSTI', 'FRHER', 'ES70X-FRLBM', 'THNEO'] for loc in locations: response = requests.get(f'https://risk.dev.logindex.com/il/risk/location/{loc}', headers=headers) data = json.loads(response.text) if 'items' in data: if len(data['items']) > 0: df = pd.json_normalize(data, 'items', 'totalItems') #NaN in column, so failed - replace NaN to empty list f = lambda x: x if isinstance(x, list) else [] df['raw.identifiers'] = df['raw.identifiers'].apply(f) df['raw.relationships'] = df['raw.relationships'].apply(f) df1 = pd.concat([pd.DataFrame(x) for x in df.pop('raw.identifiers')], keys=df.index).add_prefix('raw.identifiers.') df2 = pd.concat([pd.DataFrame(x) for x in df.pop('raw.relationships')], keys=df.index).add_prefix('raw.relationships.') df3 = df.join(df1.join(df2).reset_index(level=1, drop=True)) dfs.append(df3) df = pd.concat(dfs, ignore_index=True)
Convert the results of a dictironary to a dataframe
From this commands from stackapi import StackAPI lst = ['11786778','12370060'] df = pd.DataFrame(lst) SITE = StackAPI('stackoverflow', key="xxxx") results = [] for i in range(1,len(df)): SITE.max_pages=10000000 SITE.page_size=100 post = SITE.fetch('/users/{ids}/reputation-history', ids=lst[i]) results.append(post) The results variable prints the results of the json format How is it possible to converts the results variable to a dataframe with five columns? reputation_history_type, reputation_change, post_id, creation_date, user_id
Here try this : from stackapi import StackAPI import pandas as pd lst = ['11786778','12370060'] SITE = StackAPI('stackoverflow') results = [] SITE.max_pages=10000000 SITE.page_size=100 for i in lst: post = SITE.fetch('/users/{ids}/reputation-history', ids=[i]).get('items') results.extend([list(j.values()) for j in post]) df = pd.DataFrame(results, columns = ['reputation_history_type', 'reputation_change', 'post_id', 'creation_date', 'user_id']) Output : print(df.head()) gives : reputation_history_type reputation_change post_id creation_date user_id 0 asker_accepts_answer 2 59126012 1575207944 11786778.0 1 post_undownvoted 2 59118819 1575139301 11786778.0 2 post_upvoted 10 59118819 1575139301 11786778.0 3 post_downvoted -2 59118819 1575139299 11786778.0 4 post_upvoted 10 59110166 1575094452 11786778.0 print(df.tail()) gives : reputation_history_type reputation_change post_id creation_date user_id 170 post_upvoted 10 58906292 1574036540 12370060.0 171 answer_accepted 15 58896536 1573990105 12370060.0 172 post_upvoted 10 58896044 1573972834 12370060.0 173 post_downvoted 0 58896299 1573948372 12370060.0 174 post_downvoted 0 58896158 1573947435 12370060.0 NOTE : You can just create a dataframe direct from the result which will be list of lists. You don't need to declare SITE.max_page and SIZE.page_size every time you loop through the lst.
from stackapi import StackAPI import pandas as pd lst = ['11786778', '12370060'] df = pd.DataFrame(lst) SITE = StackAPI('stackoverflow', key="xxxx") results = [] for i in range(1, len(df)): SITE.max_pages = 10000000 SITE.page_size = 100 post = SITE.fetch('/users/{ids}/reputation-history', ids=lst[i]) results.append(post) data = [] for item in results: data.append(item) df = pd.DataFrame(data, columns=['reputation_history_type', 'reputation_change', 'post_id', 'creation_date', 'user_id'] print(df)
Kinda flying in the blind since I maxed out my StackOverflow API limit, but this should work: from stackapi import StackAPI from pandas.io.json import json_normalize lst = ['11786778','12370060'] SITE = StackAPI('stackoverflow', key="xxx") results = [] for ids in lst: SITE.max_pages=10000000 SITE.page_size=100 post = SITE.fetch('/users/{ids}/reputation-history', ids=ids) results.append(json_normalize(post, 'items')) df = pd.concat(results, ignore_index=True) json_normalize converts the JSON to dataframe pd.concat concatenates the dataframes together to make a single frame
How to reshape data in Python?
I have a data set as given below- Timestamp = 22-05-2019 08:40 :Light = 64.00 :Temp_Soil = 20.5625 :Temp_Air = 23.1875 :Soil_Moisture_1 = 756 :Soil_Moisture_2 = 780 :Soil_Moisture_3 = 1002 Timestamp = 22-05-2019 08:42 :Light = 64.00 :Temp_Soil = 20.5625 :Temp_Air = 23.125 :Soil_Moisture_1 = 755 :Soil_Moisture_2 = 782 :Soil_Moisture_3 = 1002 And I want to Reshape(rearrange) the dataset to orient header columns like [Timestamp, Light, Temp_Soil, Temp_Air, Soil_Moisture_1, Soil_Moisture_2, Soil_Moisture_3] and their values as the row entry in Python.
One of possible solutions: Instead of a "true" input file, I used a string: inp="""Timestamp = 22-05-2019 08:40 :Light = 64.00 :TempSoil = 20.5625 :TempAir = 23.1875 :SoilMoist1 = 756 :SoilMoist2 = 780 :SoilMoist3 = 1002 Timestamp = 22-05-2019 08:42 :Light = 64.00 :TempSoil = 20.5625 :TempAir = 23.125 :SoilMoist1 = 755 :SoilMoist2 = 782 :SoilMoist3 = 1002""" buf = pd.compat.StringIO(inp) To avoid "folding" of output lines, I shortened field names. Then let's create the result DataFrame and a list of "rows" to append to it. For now - both of them are empty. df = pd.DataFrame(columns=['Timestamp', 'Light', 'TempSoil', 'TempAir', 'SoilMoist1', 'SoilMoist2', 'SoilMoist3']) src = [] Below is a loop processing input rows: while True: line = buf.readline() if not(line): # EOF break lst = re.split(r' :', line.rstrip()) # Field list if len(lst) < 2: # Skip empty source lines continue dct = {} # Source "row" (dictionary) for elem in lst: # Process fields k, v = re.split(r' = ', elem) dct[k] = v # Add field : value to "row" src.append(dct) And the last step is to append rows from src to df : df = df.append(src, ignore_index =True, sort=False) When you print(df), for my test data, you will get: Timestamp Light TempSoil TempAir SoilMoist1 SoilMoist2 SoilMoist3 0 22-05-2019 08:40 64.00 20.5625 23.1875 756 780 1002 1 22-05-2019 08:42 64.00 20.5625 23.125 755 782 1002 For now all columns are of string type, so you can change the required columns to either float or int: df.Light = pd.to_numeric(df.Light) df.TempSoil = pd.to_numeric(df.TempSoil) df.TempAir = pd.to_numeric(df.TempAir) df.SoilMoist1 = pd.to_numeric(df.SoilMoist1) df.SoilMoist2 = pd.to_numeric(df.SoilMoist2) df.SoilMoist3 = pd.to_numeric(df.SoilMoist3) Note that to_numeric() function is clever enough to recognize the possible type to convert to, so first 3 columns changed their type to float64 and the next 3 to int64. You can check it executing df.info(). One more possible conversion is to change Timestamp column to DateTime type: df.Timestamp = pd.to_datetime(df.Timestamp)
Python , how to get value from a loop and assign to another loop?
I have two loops below the first one is the timand and the second one is shared. What i want to know how can i assign each result to shared["score"] ? cause what i had try below i assign shared["score"] = timang["score"] just return 1 1 1 .... And also how can we return multiple response in python for example return Response(shared_data, tomon_dat, status=status.HTTP_200_OK) is this possible? #result of timang Result: 0 Result: 1 Result: 0 Result: 0 Result: 1 Result: 1 for timang in tomon_dat: tm_ins = QuestionaireAnswerModel.objects.get(id=timang["id"]) timang["score"] = tm_ins.score timang["id"] = tm_ins.id datatest = timang["score"] for shared in shared_data: questionaire_ins = QuestionaireModel.objects.get(random_code=shared["random_code"]) shared["title"] = questionaire_ins.title shared["sub_title"] = questionaire_ins.sub_title shared["idddd"] = questionaire_ins.id answer_ins = SharedQuestionaire.objects.get(id=shared["id"]) shared["is_answered"] = (QuestionaireAnswerModel.objects.filter(shared_questionaire=answer_ins).count()) > 0 shared["score"] = timang["score"]