Python, pandas: how to extract values from a symmetric, multi-index dataframe - python

I have a symmetric, multi-index dataframe from which I want to systematically extract data:
import pandas as pd
df_index = pd.MultiIndex.from_arrays(
[["A", "A", "B", "B"], [1, 2, 3, 4]], names = ["group", "id"])
df = pd.DataFrame(
[[1.0, 0.5, 0.3, -0.4],
[0.5, 1.0, 0.9, -0.8],
[0.3, 0.9, 1.0, 0.1],
[-0.4, -0.8, 0.1, 1.0]],
index=df_index, columns=df_index)
I want a function extract_vals that can return all values related to elements in the same group, EXCEPT for the diagonal AND elements must not be double-counted. Here are two examples of the desired behavior (order does not matter):
A_vals = extract_vals("A", df) # [0.5, 0.3, -0.4, 0.9, -0.8]
B_vals = extract_vals("B", df) # [0.3, 0.9, 0.1, -0.4, -0.8]
My question is similar to this question on SO, but my situation is different because I am using a multi-index dataframe.
Finally, to make things more fun, please consider efficiency because I'll be running this many times on much bigger dataframes. Thanks very much!
EDIT:
Happy001's solution is awesome. I came up with a method myself based on the logic of extracting the elements where target is NOT in BOTH the rows and columns, and then extracting the lower triangle of those elements where target IS in BOTH the rows and columns. However, Happy001's solution is much faster.
First, I created a more complex dataframe to make sure both methods are generalizable:
import pandas as pd
import numpy as np
df_index = pd.MultiIndex.from_arrays(
[["A", "B", "A", "B", "C", "C"], [1, 2, 3, 4, 5, 6]], names=["group", "id"])
df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]],
index=df_index, columns=df_index)
Next, I defined both versions of extract_vals (the first is my own):
def extract_vals(target, multi_index_level_name, df):
# Extract entries where target is in the rows but NOT also in the columns
target_in_rows_but_not_in_cols_vals = df.loc[
df.index.get_level_values(multi_index_level_name) == target,
df.columns.get_level_values(multi_index_level_name) != target]
# Extract entries where target is in the rows AND in the columns
target_in_rows_and_cols_df = df.loc[
df.index.get_level_values(multi_index_level_name) == target,
df.columns.get_level_values(multi_index_level_name) == target]
mask = np.triu(np.ones(target_in_rows_and_cols_df.shape), k = 1).astype(np.bool)
vals_with_nans = target_in_rows_and_cols_df.where(mask).values.flatten()
target_in_rows_and_cols_vals = vals_with_nans[~np.isnan(vals_with_nans)]
# Append both arrays of extracted values
vals = np.append(target_in_rows_but_not_in_cols_vals, target_in_rows_and_cols_vals)
return vals
def extract_vals2(target, multi_index_level_name, df):
# Get indices for what you want to extract and then extract all at once
coord = [[i, j] for i in range(len(df)) for j in range(len(df)) if i < j and (
df.index.get_level_values(multi_index_level_name)[i] == target or (
df.columns.get_level_values(multi_index_level_name)[j] == target))]
return df.values[tuple(np.transpose(coord))]
I checked that both functions returned output as desired:
# Expected values
e_A_vals = np.sort([0.5, 1.0, -0.4, 1.1, -0.6, 1.2, 0.1, 0.3, 1.3])
e_B_vals = np.sort([0.5, 1.2, -0.8, -0.9, 0.4, -0.4, 0.1, 0.5, -0.2])
e_C_vals = np.sort([1.1, -0.9, 0.3, 0.5, 0.7, -0.6, 0.4, 1.3, -0.2])
# Sort because order doesn't matter
assert np.allclose(np.sort(extract_vals("A", "group", df)), e_A_vals)
assert np.allclose(np.sort(extract_vals("B", "group", df)), e_B_vals)
assert np.allclose(np.sort(extract_vals("C", "group", df)), e_C_vals)
assert np.allclose(np.sort(extract_vals2("A", "group", df)), e_A_vals)
assert np.allclose(np.sort(extract_vals2("B", "group", df)), e_B_vals)
assert np.allclose(np.sort(extract_vals2("C", "group", df)), e_C_vals)
And finally, I checked speed:
## Test speed
import time
# Method 1
start1 = time.time()
for ii in range(10000):
out = extract_vals("C", "group", df)
elapsed1 = time.time() - start1
print elapsed1 # 28.5 sec
# Method 2
start2 = time.time()
for ii in range(10000):
out2 = extract_vals2("C", "group", df)
elapsed2 = time.time() - start2
print elapsed2 # 10.9 sec

I don't assume df has the same columns and index. (Of course they can be the same).
def extract_vals(group_label, df):
coord = [[i, j] for i in range(len(df)) for j in range(len(df)) if i<j and (df.index.get_level_values('group')[i] == group_label or df.columns.get_level_values('group')[j] == group_label) ]
return df.values[tuple(np.transpose(coord))]
print extract_vals('A', df)
print extract_vals('B', df)
result:
[ 0.5 0.3 -0.4 0.9 -0.8]
[ 0.3 -0.4 0.9 -0.8 0.1]

is that what you want?
all elements above the diagonal:
In [139]: df.values[np.triu_indices(len(df), 1)]
Out[139]: array([ 0.5, 0.3, -0.4, 0.9, -0.8, 0.1])
A_vals:
In [140]: df.values[np.triu_indices(len(df), 1)][:-1]
Out[140]: array([ 0.5, 0.3, -0.4, 0.9, -0.8])
B_vals:
In [141]: df.values[np.triu_indices(len(df), 1)][1:]
Out[141]: array([ 0.3, -0.4, 0.9, -0.8, 0.1])
Source matrix:
In [142]: df.values
Out[142]:
array([[ 1. , 0.5, 0.3, -0.4],
[ 0.5, 1. , 0.9, -0.8],
[ 0.3, 0.9, 1. , 0.1],
[-0.4, -0.8, 0.1, 1. ]])

Related

Selecting items on a matrix based on indexes given by an array

Consider this matrix:
[0.9, 0.45, 0.4, 0.35],
[0.4, 0.8, 0.3, 0.25],
[0.5, 0.45, 0.9, 0.35],
[0.2, 0.18, 0.8, 0.1],
[0.6, 0.45, 0.4, 0.9]
and this list:
[0,1,2,3,3]
I want to create a list that looks like the following:
[0.9, 0.8, 0.9, 0.1, 0.9]
To clarify, for each row, I want the element of the matrix whose column index is contained in the first array. How can I accomplish this?
Zip the two lists together as below
a=[[0.9, 0.45, 0.4, 0.35],[0.4, 0.8, 0.3, 0.25],[0.5, 0.45, 0.9, 0.35],[0.2, 0.18, 0.8, 0.1],[0.6, 0.45, 0.4, 0.9]]
b=[0,1,2,3,3]
[i[j] for i,j in zip(a,b)]
Result
[0.9, 0.8, 0.9, 0.1, 0.9]
This basically pairs up each sublist in the matrix with the element of your second list in order with zip(a,b)
Then for each pair you choose the bth element of a
If this is a numpy array, you can pass in two numpy arrays to access the desired indices:
import numpy as np
data = np.array([[0.9, 0.45, 0.4, 0.35],
[0.4, 0.8, 0.3, 0.25],
[0.5, 0.45, 0.9, 0.35],
[0.2, 0.18, 0.8, 0.1],
[0.6, 0.45, 0.4, 0.9]])
indices = np.array([0,1,2,3,3])
data[np.arange(data.shape[0]), indices]
This outputs:
[0.9 0.8 0.9 0.1 0.9]
In the first array [0, 1, 2, 3, 3], the row is determined by the index of the each element, and the value at that index is the column. This is a good case for enumerate:
matrix = [[ ... ], [ ... ], ...] # your matrix
selections = [0, 1, 2, 3, 3]
result = [matrix[i][j] for i, j in enumerate(selections)]
This will be much more efficient than looping through the entire matrix.
Loop through both arrays together using the zip function.
def create_array_from_matrix(matrix, indices):
if len(matrix) != len(indices):
return None
res = []
for row, index in zip(matrix, indices):
res.append(row[index])
return res

Return the biggest value less than one from a numpy vector

I have a numpy vector in python and I want to find the index of the max value of the vector with the condition that it is less than one. I have as an example the following:
temp_res = [0.9, 0.8, 0.7, 0.99, 1.2, 1.5, 0.1, 0.5, 0.1, 0.01, 0.12, 0.56, 0.89, 0.23, 0.56, 0.78]
temp_res = np.asarray(temp_res)
indices = np.where((temp_res == temp_res.max()) & (temp_res < 1))
However, what I tried always return an empty matrix since those two conditions cannot be met. HU want to return as final result the index = 3 which correspond to 0.99 the biggest value that it is less than 1. How can I do so?
You need to perform the max() function after filtering your array:
temp_res = np.asarray(temp_res)
temp_res[temp_res < 1].max()
Out[60]: 0.99
If you want to find all the indexes, here is a more genera approach:
mask = temp_res < 1
indices = np.where(mask)
maximum = temp_res[mask].max()
max_indices = np.where(temp_res == maximum)
Example:
...: temp_res = [0.9, 0.8, 0.7, 1, 0.99, 0.99, 1.2, 1.5, 0.1, 0.5, 0.1, 0.01, 0.12, 0.56, 0.89, 0.23, 0.56, 0.78]
...: temp_res = np.asarray(temp_res)
...: mask = temp_res < 1
...: indices = np.where(mask)
...: maximum = temp_res[mask].max()
...: max_indices = np.where(temp_res == maximum)
...:
In [72]: max_indices
Out[72]: (array([4, 5]),)
You can use:
np.where(temp_res == temp_res[temp_res < 1].max())[0]
Example:
In [49]: temp_res
Out[49]:
array([0.9 , 0.8 , 0.7 , 0.99, 1.2 , 1.5 , 0.1 , 0.5 , 0.1 , 0.01, 0.12,
0.56, 0.89, 0.23, 0.56, 0.78])
In [50]: np.where(temp_res == temp_res[temp_res < 1].max())[0]
...:
Out[50]: array([3])

Two denominational array slicing

I have a portion of Viterbi algorithm that I want to manipulate. I need to understand the slicing part in this code:
import numpy as np
A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]])
pi = np.array([0.5, 0.2, 0.3])
O = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]])
states = UP, DOWN, UNCHANGED = 0, 1, 2
observations = [UP, UP, DOWN]
alpha = np.zeros((len(observations), len(states))) # time steps x states
alpha[:,:] = float('-inf')
backpointers = np.zeros((len(observations), len(states)), 'int')
***alpha[0, :] = pi * O[:,UP]***
in the last line print out the O[:,UP] what is should give me:
[0.7, 0.1, 0.2] I believe
instead, it gives me:
O[:,UP]
Out[15]: array([ 0.7, 0.1, 0.3])
I tried to look into this Understanding Python's slice notation
I couldn't understand why it changes the last element of the array.
Also, I run this:
O[:,UNCHANGED]
Out[17]: array([ 0.2, 0.3, 0.4])
I'm still newbie in python, I need some help
You are mixing up the notation for columns and rows.
You print O[:,UP] which gives you all the rows and just the "UP"th column (index 0).
Your O is:
array([[ 0.7, 0.1, 0.2],
[ 0.1, 0.6, 0.3],
[ 0.3, 0.3, 0.4]])
And O[:,0] is
#↓ this column
array([[ 0.7, 0.1, 0.2],
[ 0.1, 0.6, 0.3],
[ 0.3, 0.3, 0.4]])
where O[0,:] would be
array([[ 0.7, 0.1, 0.2], #This row
[ 0.1, 0.6, 0.3],
[ 0.3, 0.3, 0.4]])
And just to make the last part clear, O[:,UNCHANGED] is O[:,2] which is here:
#↓ this column
array([[ 0.7, 0.1, 0.2],
[ 0.1, 0.6, 0.3],
[ 0.3, 0.3, 0.4]])

Calculate difference each time the sign changes in a list of values

Ok let's imagine that I have a list of values like so:
list = [-0.23, -0.5, -0.3, -0.8, 0.3, 0.6, 0.8, -0.9, -0.4, 0.1, 0.6]
I would like to loop on this list and when the sign changes to get the absolute difference between the maximum (minimum if it's negative) of the first interval and maximum (minimum if it's negative) of the next interval.
For example on the previous list, we would like to have a result like so:
[1.6, 1.7, 1.5]
The tricky part is that it has to work also for lists like:
list = [0.12, -0.23, 0.52, 0.2, 0.6, -0.3, 0.4]
Which would return :
[0.35, 0.83, 0.9, 0.7]
It's tricky because some intervals are 1 value long, and I'm having difficulties with managing this.
How would you solve this with the least possible number of lines?
Here is my current code, but it's not working at the moment.
list is a list of 6 lists, where each of these 6 lists contains else a nan, else a np.array of 1024 values (the values I want to evaluate)
Hmax = []
for c in range(0,6):
Hmax_tmp = []
for i in range(len(list[c])):
if(not np.isnan(list[c][i]).any()):
zero_crossings = np.where(np.diff(np.sign(list[c][i])))[0]
if(not zero_crossings[0] == 0):
zero_crossings = [0] + zero_crossings.tolist() + [1023]
diff = []
for j in range(1,len(zero_crossings)-2):
if
diff.append(max(list[c][i][np.arange(zero_crossings[j-1],zero_crossings[j])].min(), list[c][i][np.arange(zero_crossings[j]+1,zero_crossings[j+1])].max(), key=abs) - max(list[c][i][np.arange(zero_crossings[j+1],zero_crossings[j+2])].min(), list[c][i][np.arange(zero_crossings[j+1],zero_crossings[j+2])].max(), key=abs))
Hmax_tmp.append(np.max(diff))
else:
Hmax_tmp.append(np.nan)
Hmax.append(Hmax_tmp)
This type of grouping operation can be greatly simplified using itertools.groupby. For example:
>>> from itertools import groupby
>>> lst = [-0.23, -0.5, -0.3, -0.8, 0.3, 0.6, 0.8, -0.9, -0.4, 0.1, 0.6] # the list
>>> minmax = [min(v) if k else max(v) for k,v in groupby(lst, lambda a: a < 0)]
>>> [abs(j-i) for i,j in zip(minmax[:-1], minmax[1:])]
[1.6, 1.7000000000000002, 1.5]
And the second list:
>>> lst2 = [0.12, -0.23, 0.52, 0.2, 0.6, -0.3, 0.4] # the list
>>> minmax = [min(v) if k else max(v) for k,v in groupby(lst2, lambda a: a < 0)]
>>> [abs(j-i) for i,j in zip(minmax[:-1], minmax[1:])]
[0.35, 0.83, 0.8999999999999999, 0.7]
So here, the list is grouped into consecutive intervals of negative/positive values. The min/max is computed for each group and stored in a list minmax. Lastly, a list comprehension finds the differences.
Excuse the inexact representations of floating point numbers!
It would be straightforward to retrieve max/min values of intervals, and then do the calculation.
def difference(nums):
if not nums:
return []
pivots = []
last_sign = nums[0] >= 0
current = 0
for x in nums:
current_sign = x >= 0
if current_sign != last_sign:
pivots.append(current)
current = 0
last_sign = current_sign
current = max(current, x) if current_sign else min(current, x)
pivots.append(current)
result = []
for idx in xrange(len(pivots)):
if idx + 1 < len(pivots):
result.append(abs(pivots[idx] - pivots[idx + 1]))
return result
>>> print difference([-0.23, -0.5, -0.3, -0.8, 0.3, 0.6, 0.8, -0.9, -0.4, 0.1, 0.6])
[1.6, 1.7000000000000002, 1.5]
>>> print difference([0.12, -0.23, 0.52, 0.2, 0.6, -0.3, 0.4])
[0.35, 0.83, 0.8999999999999999, 0.7]

Loop searching all possible combinations in an array

I'm having trouble with an algorithm in python.
I have an array of 4 values [a,b,c,d] those are percentages so at any given time a+b+c+d=1. I need a loop that goes through all possible combinations of these numbers with a stepsize of 0.1. Example:
[0,0,0,1]
[0,0,0.1,0.9]
[0,0.1,0.1,0.8]
[0.1,0.1,0.1,0.7]
.....
[0.1,0.1,0.2,0.6]
[0.1,0.1,0.3,0.5]
....
[1,0,0,0]
I created a code that seems to overflow... any help? ty I Know its a noob question...
def frange(start, stop, step):
while start <= stop:
yield start
start += step
def distribuir(p,array):
if len(array) == 3:
array.append(p)
print(Array)
return
for i in frange(0,1,0.1):
temp = []
temp.append(array)
temp.append(i)
distribuir(p-i,temp)
A naive recursive solution with a lot of room for optimization:
import itertools
def possibilities(prefix, size, values, total):
if size == 0:
return [prefix] if sum(prefix) == total else []
return itertools.chain(*map(
lambda v: possibilities(prefix+[v], size-1, values, total),
values
))
Example:
list(
map(
lambda t: map(float, t),
possibilities(
prefix=[],
size=3,
values=map(Decimal, ['0', '0.1', '0.2', '0.3']),
total=Decimal('0.4')
)
)
)
Output:
[[0.0, 0.1, 0.3],
[0.0, 0.2, 0.2],
[0.0, 0.3, 0.1],
[0.1, 0.0, 0.3],
[0.1, 0.1, 0.2],
[0.1, 0.2, 0.1],
[0.1, 0.3, 0.0],
[0.2, 0.0, 0.2],
[0.2, 0.1, 0.1],
[0.2, 0.2, 0.0],
[0.3, 0.0, 0.1],
[0.3, 0.1, 0.0]]

Categories

Resources