Trouble with lossy conversion Python - python

I wrote a program with Python that is designed to find a formula describing a set of numbers. Anyway, this formula works pretty well with smaller numbers that "play nice" (for instance, it'll handle [1, 5, 7, 4] just fine). However, when you feed it anything too large ([10, 42, 20, 42, 30, 42]) it quickly throws an exception (RuntimeError: maximum recursion depth exceeded in comparison). Now, when I debug this, I find that I'm seeing a bit of lossy conversion to double (for this set of numbers, nval is, on the second iteration, -611.8000000000000001). This, of course, makes things go all wrong and recurse infinitely. Now, I realize that my program is probably crappy BEYOND the lossy conversion, and I'd love any tips you have on that too, but does anyone have any ideas on fixing this?
Code (as per #jonrsharpe's comment, this is as brief as I can make it without sacrificing readability):
import math
def getdeg(numlist, cnt):
if allEqual(numlist):
return (0, numlist[0])
count = cnt
templist = []
for i in range(len(numlist) - 1):
templist.append(numlist[i+1] - numlist[i])
count += 1
if not allEqual(templist):
return getdeg(templist, count)
else:
return (count, templist[0])
def allEqual(numlist):
x = len(numlist)
if x == 1:
return True
for i in range(x-1):
if not (numlist[i] == numlist[i+1]):
return False
return True
def getTerms(numlist, terms, maxpower):
newtable = []
power, fval = getdeg(numlist, 0)
if maxpower == 0:
maxpower = power
terms.append(fval / float(math.factorial(power)))
if not power == 0:
for i in range(len(numlist)):
nval = numlist[i] - (terms[maxpower - power] * ((i + 1) ** power))
newtable.append(nval)
return getTerms(newtable, terms, maxpower)
return terms
def printeq(numlist):
#numlist = [2, 8, 9, 11, 20]
print("Coeff\tPower")
x = getTerms(numlist, [], 0)
topPow = len(x) - 1
for i in range(len(x)):
print(str(x[i]) + "\t" + str(topPow))
topPow -= 1
printeq([10, 42, 20, 42, 30, 42])

Perhaps the fractions module would be useful to you. It can exactly represent any real number, no lossy conversion required. I don't quite understand what your code is doing, but I think you can incorporate fractions by putting import fractions at the top of your script changing a single existing line. Within getTerms,
terms.append(fval / float(math.factorial(power)))
becomes
terms.append(fractions.Fraction(fval,math.factorial(power)))
Then your program runs without crashing:
Coeff Power
13/5 5
-139/3 4
932/3 3
-2900/3 2
20576/15 1
-662 0

Related

Recursive symbolic calculations - improve the performance

In my research I'm trying to tackle the Kolmogorov backward equation, i.e. interested in
$$Af = b(x)f'(x)+\sigma(x)f''(x)$$
With the specific b(x) and \sigma(x), I'm trying to see how fast the coefficients of the expression are growing when calculating higher Af powers. I'm struggle to derive this analytically thus tried to see the trend empirically.
First, I have used sympy:
from sympy import *
import matplotlib.pyplot as plt
import re
import math
import numpy as np
import time
np.set_printoptions(suppress=True)
x = Symbol('x')
b = Function('b')(x)
g = Function('g')(x)
def new_coef(gamma, beta, coef_minus2, coef_minus1, coef):
return expand(simplify(gamma*coef_minus2 + beta*coef_minus1 + 2*gamma*coef_minus1.diff(x)\
+beta*coef.diff(x)+gamma*coef.diff(x,2)))
def new_coef_first(gamma, beta, coef):
return expand(simplify(beta*coef.diff(x)+gamma*coef.diff(x,2)))
def new_coef_second(gamma, beta, coef_minus1, coef):
return expand(simplify(beta*coef_minus1 + 2*gamma*coef_minus1.diff(x)\
+beta*coef.diff(x)+gamma*coef.diff(x,2)))
def new_coef_last(gamma, beta, coef_minus2):
return lambda x: gamma(x)*coef_minus2(x)
def new_coef_last(gamma, beta, coef_minus2):
return expand(simplify(gamma*coef_minus2 ))
def new_coef_second_to_last(gamma, beta, coef_minus2, coef_minus1):
return expand(simplify(gamma*coef_minus2 + beta*coef_minus1 + 2*gamma*coef_minus1.diff(x)))
def set_to_zero(expression):
expression = expression.subs(Derivative(b, x, x, x), 0)
expression = expression.subs(Derivative(b, x, x), 0)
expression = expression.subs(Derivative(g, x, x, x, x), 0)
expression = expression.subs(Derivative(g, x, x, x), 0)
return expression
def sum_of_coef(expression):
sum_of_coef = 0
for i in str(expression).split(' + '):
if i[0:1] == '(':
i = i[1:]
integers = re.findall(r'\b\d+\b', i)
if len(integers) > 0:
length_int = len(integers[0])
if i[0:length_int] == integers[0]:
sum_of_coef += int(integers[0])
else:
sum_of_coef += 1
else:
sum_of_coef += 1
return sum_of_coef
power = 6
charar = np.zeros((power, power*2), dtype=Symbol)
coef_sum_array = np.zeros((power, power*2))
charar[0,0] = b
charar[0,1] = g
coef_sum_array[0,0] = 1
coef_sum_array[0,1] = 1
for i in range(1, power):
#print(i)
for j in range(0, (i+1)*2):
#print(j, ':')
#start_time = time.time()
if j == 0:
charar[i,j] = set_to_zero(new_coef_first(g, b, charar[i-1, j]))
elif j == 1:
charar[i,j] = set_to_zero(new_coef_second(g, b, charar[i-1, j-1], charar[i-1, j]))
elif j == (i+1)*2-2:
charar[i,j] = set_to_zero(new_coef_second_to_last(g, b, charar[i-1, j-2], charar[i-1, j-1]))
elif j == (i+1)*2-1:
charar[i,j] = set_to_zero(new_coef_last(g, b, charar[i-1, j-2]))
else:
charar[i,j] = set_to_zero(new_coef(g, b, charar[i-1, j-2], charar[i-1, j-1], charar[i-1, j]))
#print("--- %s seconds for expression---" % (time.time() - start_time))
#start_time = time.time()
coef_sum_array[i,j] = sum_of_coef(charar[i,j])
#print("--- %s seconds for coeffiecients---" % (time.time() - start_time))
coef_sum_array
Then, looked into automated differentiation and used autograd:
import autograd.numpy as np
from autograd import grad
import time
np.set_printoptions(suppress=True)
b = lambda x: 1 + x
g = lambda x: 1 + x + x**2
def new_coef(gamma, beta, coef_minus2, coef_minus1, coef):
return lambda x: gamma(x)*coef_minus2(x) + beta(x)*coef_minus1(x) + 2*gamma(x)*grad(coef_minus1)(x)\
+beta(x)*grad(coef)(x)+gamma(x)*grad(grad(coef))(x)
def new_coef_first(gamma, beta, coef):
return lambda x: beta(x)*grad(coef)(x)+gamma(x)*grad(grad(coef))(x)
def new_coef_second(gamma, beta, coef_minus1, coef):
return lambda x: beta(x)*coef_minus1(x) + 2*gamma(x)*grad(coef_minus1)(x)\
+beta(x)*grad(coef)(x)+gamma(x)*grad(grad(coef))(x)
def new_coef_last(gamma, beta, coef_minus2):
return lambda x: gamma(x)*coef_minus2(x)
def new_coef_second_to_last(gamma, beta, coef_minus2, coef_minus1):
return lambda x: gamma(x)*coef_minus2(x) + beta(x)*coef_minus1(x) + 2*gamma(x)*grad(coef_minus1)(x)
power = 6
coef_sum_array = np.zeros((power, power*2))
coef_sum_array[0,0] = b(1.0)
coef_sum_array[0,1] = g(1.0)
charar = [b, g]
for i in range(1, power):
print(i)
charar_new = []
for j in range(0, (i+1)*2):
if j == 0:
new_funct = new_coef_first(g, b, charar[j])
elif j == 1:
new_funct = new_coef_second(g, b, charar[j-1], charar[j])
elif j == (i+1)*2-2:
new_funct = new_coef_second_to_last(g, b, charar[j-2], charar[j-1])
elif j == (i+1)*2-1:
new_funct = new_coef_last(g, b, charar[j-2])
else:
new_funct = new_coef(g, b, charar[j-2], charar[j-1], charar[j])
coef_sum_array[i,j] = new_funct(1.0)
charar_new.append(new_funct)
charar = charar_new
coef_sum_array
However, I'm so not happy with the speed of either of them. I would like to do at least thousand iterations while after 3 days of running simpy method, I got 30 :/
I expect that the second method (numerical) could be optimized to avoid recalculating expressions every time. Unfortunately, I cannot see that solution myself. Also, I have tried Maple, but again without luck.
Overview
So, there are two formulas about derivatives that are interesting here:
Faa di Bruno's formula which is a way to quickly find the n-th derivative of f(g(x)) , and looks a lot like the Multinomial theorem
The General Leibniz rule which is a way to quickly find the n-th derivative of f(x)*g(x) and looks a lot like the Binomial theorem
Both of these have been discussed in pull request #13892 the n-th derivative was sped up using the general Leibniz rule.
I'm trying to see how fast the coefficients of the expression are growing
In your code, the general formula for computing c[i][j] is this:
c[i][j] = g * c[i-1][j-2] + b * c[i-1][j-1] + 2 * g * c'[i-1][j-1] + g * c''[i-1][j]
(where by c'[i][j] and c''[i][j] are the 1st and 2nd derivatives of c[i][j])
Because of this, and by the Leibniz rule mentioned above, I think intuitively, the coefficients computed should be related to Pascal's triangle (or at the very least they should have some combinatorial relation).
Optimization #1
In the original code, the function sum_to_coef(f) serializes the expression f to a string and then discarding everything that doesn't look like a number, and then sums the remaining numbers.
We can avoid serialization here by just traversing the expression tree and collecting what we need
def sum_of_coef(f):
s = 0
if f.func == Add:
for sum_term in f.args:
res = sum_term if sum_term.is_Number else 1
if len(sum_term.args) == 0:
s += res
continue
first = sum_term.args[0]
if first.is_Number == True:
res = first
else:
res = 1
s += res
elif f.func == Mul:
first = f.args[0]
if first.is_Number == True:
s = first
else:
s = 1
elif f.func == Pow:
s = 1
return s
Optimization #2
In the function set_to_zero(expr) all the 2nd and 3rd derivatives of b, and the 3rd and 4th derivatives of g are replaced by zero.
We can collapse all those substitutions into one statement like so:
b3,b2=b.diff(x,3),b.diff(x,2)
g4,g3=g.diff(x,4),g.diff(x,3)
def set_to_zero(expression):
expression = expression.subs({b3:0,b2:0,g4:0,g3:0})
return expression
Optimization #3
In the original code, for every cell c[i][j] we're calling simplify. This turns out to have a big impact on performance but actually we can skip this call, because fortunately our expressions are just sums of products of derivatives or unknown functions.
So the line
charar[i,j] = set_to_zero(expand(simplify(expr)))
becomes
charar[i,j] = set_to_zero(expand(expr))
Optimization #4
The following was also tried but turned out to have very little impact.
For two consecutive values of j, we're computing c'[i-1][j-1] twice.
j-1 c[i-1][j-3] c[i-1][j-2] c[i-1][j-1]
j c[i-1][j-2] c[i-1][j-1] c[i-1][j]
If you look at the loop formula in the else branch, you see that c'[i-1][j-1] has already been computed. It can be cached, but this optimization
has little effect in the SymPy version of the code.
Here it's also important to mention that it's possible to visualize the call tree of SymPy involved in computing these derivatives. It's actually larger, but here is part of it:
We can also generate a flamegraph using the py-spy module just to see where time is being spent:
As far as I could tell, 34% of the time spent in _eval_derivative_n_times , 10% of the time spent in the function getit from assumptions.py , 12% of the time spent in subs(..) , 12% of the time spent in expand(..)
Optimization #5
Apparently when pull request #13892 was merged into SymPy, it also introduced a performance regression.
In one of the comments regarding that regression, Ondrej Certik recommends using SymEngine to improve performance of code that makes heavy use of derivatives.
So I've ported the code mentioned to SymEngine.py and noticed that it runs 98 times faster than the SymPy version for power=8 ( and 4320 times faster for power=30)
The required module can be installed via pip3 install --user symengine.
#!/usr/bin/python3
from symengine import *
import pprint
x=var("x")
b=Function("b")(x)
g=Function("g")(x)
b3,b2=b.diff(x,3),b.diff(x,2)
g4,g3=g.diff(x,4),g.diff(x,3)
def set_to_zero(e):
e = e.subs({b3:0,b2:0,g4:0,g3:0})
return e
def sum_of_coef(f):
s = 0
if f.func == Add:
for sum_term in f.args:
res = 1
if len(sum_term.args) == 0:
s += res
continue
first = sum_term.args[0]
if first.is_Number == True:
res = first
else:
res = 1
s += res
elif f.func == Mul:
first = f.args[0]
if first.is_Number == True:
s = first
else:
s = 1
elif f.func == Pow:
s = 1
return s
def main():
power = 8
charar = [[0] * (power*2) for x in range(power)]
coef_sum_array = [[0] * (power*2) for x in range(power)]
charar[0][0] = b
charar[0][1] = g
init_printing()
for i in range(1, power):
jmax = (i+1)*2
for j in range(0, jmax):
c2,c1,c0 = charar[i-1][j-2],charar[i-1][j-1],charar[i-1][j]
#print(c2,c1,c0)
if j == 0:
expr = b*c0.diff(x) + g*c0.diff(x,2)
elif j == 1:
expr = b*c1 + 2*g*c1.diff(x) + b*c0.diff(x) + g*c0.diff(x,2)
elif j == jmax-2:
expr = g*c2 + b*c1 + 2*g*c1.diff(x)
elif j == jmax-1:
expr = g*c2
else:
expr = g*c2 + b*c1 + 2*g*c1.diff(x) + b*c0.diff(x) + g*c0.diff(x,2)
charar[i][j] = set_to_zero(expand(expr))
coef_sum_array[i][j] = sum_of_coef(charar[i][j])
pprint.pprint(Matrix(coef_sum_array))
main()
Performance after optimization #5
I think it would be very interesting to look at the number of terms in c[i][j] to determine how quickly the expressions are growing. That would definitely help in estimating the complexity of the current code.
But for practical purposes I've plotted the current time and memory consumption of the SymEngine code above and managed to get the following chart:
Both the time and the memory seem to be growing polynomially with the input (the power parameter in the original code).
The same chart but as a log-log plot can be viewed here:
Like the wiki page says, a straight line on a log-log plot corresponds to a monomial. This offers a way to recover the exponent of the monomial.
So if we consider two points N=16 and N=32 between which the log-log plot looks like a straight line
import pandas as pd
df=pd.read_csv("modif6_bench.txt", sep=',',header=0)
def find_slope(col1,col2,i1,i2):
xData = df[col1].to_numpy()
yData = df[col2].to_numpy()
x0,x1 = xData[i1],xData[i2]
y0,y1 = yData[i1],yData[i2]
m = log(y1/y0)/log(x1/x0)
return m
print("time slope = {0:0.2f}".format(find_slope("N","time",16,32)))
print("memory slope = {0:0.2f}".format(find_slope("N","memory",16,32)))
Output:
time slope = 5.69
memory slope = 2.62
So very rough approximation of time complexity would be O(n^5.69) and an approximation of space complexity would be O(2^2.62).
There are more details about deciding whether the growth rate is polynomial or exponential here (it involves drawing a semi-log and a log-log plot, and seeing where the data shows up as a straight line).
Performance with defined b and g functions
In the first original code block, the functions b and g were undefined functions. This means SymPy and SymEngine didn't know anything about them.
The 2nd original code block defines b=1+x and g=1+x+x**2 . If we run all of this again with known b and g the code runs much faster, and the running time curve and the memory usage curves are better than with unknown functions
time slope = 2.95
memory slope = 1.35
Recorded data fitting onto known growth-rates
I wanted to look a bit more into matching the observed resource consumption(time and memory), so I wrote the following Python module that fits each growth rate (from a catalog of common such growth rates) to the recorded data, and then shows the plot to the user.
It can be installed via pip3 install --user matchgrowth
When run like this:
match-growth.py --infile ./tests/modif7_bench.txt --outfile time.png --col1 N --col2 time --top 1
It produces graphs of the resource usage, as well as the closest growth rates it matches to. In this case, it finds the polynomial growth to be closest:
Other notes
If you run this for power=8 (in the symengine code mentioned above) the coefficients will look like this:
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 5, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 17, 40, 31, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 53, 292, 487, 330, 106, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 161, 1912, 6091, 7677, 4693, 1520, 270, 25, 1, 0, 0, 0, 0, 0, 0]
[1, 485, 11956, 68719, 147522, 150706, 83088, 26573, 5075, 575, 36, 1, 0, 0, 0, 0]
[1, 1457, 73192, 735499, 2568381, 4118677, 3528928, 1772038, 550620, 108948, 13776, 1085, 49, 1, 0, 0]
[1, 4373, 443524, 7649215, 42276402, 102638002, 130209104, 96143469, 44255170, 13270378, 2658264, 358890, 32340, 1876, 64, 1]
So as it turns out, the 2nd column coincides with A048473 which according to OEIS is "The number of triangles (of all sizes, including holes) in Sierpiński's triangle after n inscriptions".
All the code for this is also available in this repo.
Relations between polynomial coefficients from the i-th line with coefficients from the (i-1)-th line
In the previous post c[i][j] was calculated. It's possible to check that deg(c[i][j])=j+1 .
This can be checked by initializing a separate 2d array, and computing the degree like so:
deg[i][j] = degree(poly(parse_expr(str(charar[i][j]))))
Vertical formulas:
Then if we denote by u(i,j,k) the coefficient of x^k in c[i][j] , we can try to find formulas for u(i,j,k) in terms of u(i-1,_,_). Formulas for u(i,j,_) will be the same as formulas for u(i+1,j,_) (and all following rows), so there's some oportunity there for caching.
Horizontal formulas:
It's also interesting that when we fix i, and find that the formulas for u(i,j,_) look the same as they do for u(i,j+1,_) except for the last 3 values of k. But I'm not sure if this can be leveraged.
The caching steps mentioned above might help to skip unnecessary computations.
See more about this here.
Some notes about analytical, closed-form solutions and asymptotics
I'm struggling to derive this analytically
Yes, this seems to be hard. The closest class of recursive sequences related to the one mentioned here are called Holonomic sequences (also called D-finite or P-recursive). The sequence c[i][j] is not C-finite because it has polynomial coefficients (in the general case even the asymptotics of recurrences with polynomial coefficients is an open problem).
However, the recurrence relation for c[i][j] does not qualify for this because of the derivatives. If we were to leave out the derivatives in the formula of c[i][j] then it would qualify as a Holonomic sequence. Here are some places where I found solutions for these:
"The Concrete Tetrahedron: Symbolic Sums, Recurrence Equations, Generating Functions, Asymptotic Estimates" by Kauers and Paule - Chapter 7 Holonomic Sequences and Power Series
Analytic Combinatorics by Flajolet and Sedgewick - Appendix B.4 Holonomic Functions
But also c[i][j] is a several variable recurrence, so that's another reason why it doesn't fit into that theory mentioned above.
There is however another book called Analytic Combinatorics in Several Variables by Robin Pemantle and Mark C. Wilson which does handle several variable recurrences.
All the books mentioned above require a lot of complex analysis, and they go much beyond the little math that I currently know, so hopefully someone with a more solid understanding of that kind of math can try this out.
The most advanced CAS that has generating-function-related operations and can operate on this kind of sequences is Maple and the gfun package gfun repo (which for now only handles the univariate case).

Two number Sum program in python O(N^2)

I am used to write code in c++ but now I am trying to learn python. I came to know about the Python language and it is very popular among everyone. So I thought, let's give it a shot.
Currently I am preparing for companies interview questions and able to solve most of them in c++. Alongside which, I am trying to write the code for the same in Python. For the things which I am not familiar with, I do a google search or watch tutorials etc.
While I was writing code for my previously solved easy interview questions in python, I encountered a problem.
Code : Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Given an array of integers, print the indices of the two numbers such that they add up to a specific target.
def twoNum(*arr, t):
cur = 0
x = 0
y = 0
for i in range (len(arr) - 1):
for j in range (len(arr) - 1):
if(i == j):
break
cur = arr[i] + arr[j]
if(t == cur):
x = arr[i]
y = arr[j]
break
if(t == cur):
break
print(f"{x} + {y} = {x+y} ")
arr = [3, 5, -4, 8, 11, 1, -1, 6]
target = 10
twoNum(arr, t=target)
So here is the problem: I have defined x, y in function and then used x = arr[i] and y = arr[j] and I m printing those values.
output coming is : is 0 + 0 = 10 (where target is 10)
This is I guess probably because I am using x = 0 and y = 0 initially in the function and it seems x and y values are not updating then I saw outline section in VSCode there I saw x and y are declared twice, once at the starting of the function and second in for loop.
Can anyone explain to me what is going on here?
For reference, here is an image of the code I wrote in C++
Change this:
def twoNum(*arr, t):
to this:
def twoNum(arr, t):
* is used to indicate that there will be a variable number of arguments, see this. It is not for pointers as in C++.
Basically what you are trying to do is to write C code in python.
I would instead try to focus first on how to write python code in a 'pythonic' way first. But for your question - sloving it your way using brute force in python:
In [173]: def two_num(arr, t):
...: for i in arr:
...: for j in arr[i + 1: ]:
...: if i + j == t:
...: print(f"{i} + {j} = {t}")
...: return
Here's a way to implement a brute force approach using a list comprehension:
arr = [1,3,5,7,9]
target = 6
i,j = next((i,j) for i,n in enumerate(arr[:-1]) for j,m in enumerate(arr[i+1:],i+1) if n+m==target)
output:
print(f"arr[{i}] + arr[{j}] = {arr[i]} + {arr[j]} = {target}")
# arr[0] + arr[2] = 1 + 5 = 6
Perhaps even more pythonic would be to use iterators:
from itertools import tee
iArr = enumerate(arr)
i,j = next((i,j) for i,n in iArr for j,m in tee(iArr,1)[0] if n+m==target)
When you get to implementing an O(n) solution, you should look into dictionaries:
d = { target-n:j for j,n in enumerate(arr) }
i,j = next( (i,d[m]) for i,m in enumerate(arr) if m in d and d[m] != i )

Imprecise results of logarithm and power functions in Python

I am trying to complete the following exercise:
https://www.codewars.com/kata/whats-a-perfect-power-anyway/train/python
I tried multiple variations, but my code breaks down when big numbers are involved (I tried multiple variations with solutions involving log and power functions):
Exercise:
Your task is to check wheter a given integer is a perfect power. If it is a perfect power, return a pair m and k with m^k = n as a proof. Otherwise return Nothing, Nil, null, None or your language's equivalent.
Note: For a perfect power, there might be several pairs. For example 81 = 3^4 = 9^2, so (3,4) and (9,2) are valid solutions. However, the tests take care of this, so if a number is a perfect power, return any pair that proves it.
The exercise uses Python 3.4.3
My code:
import math
def isPP(n):
for i in range(2 +n%2,n,2):
a = math.log(n,i)
if int(a) == round(a, 1):
if pow(i, int(a)) == n:
return [i, int(a)]
return None
Question:
How is it possible that I keep getting incorrect answers for bigger numbers? I read that in Python 3, all ints are treated as "long" from Python 2, i.e. they can be very large and still represented accurately. Thus, since i and int(a) are both ints, shouldn't the pow(i, int(a)) == n be assessed correctly? I'm actually baffled.
(edit note: also added integer nth root bellow)
you are in the right track with logarithm but you are doing the math wrong, also you are skipping number you should not and only testing all the even number or all the odd number without considering that a number can be even with a odd power or vice-versa
check this
>>> math.log(170**3,3)
14.02441559235585
>>>
not even close, the correct method is described here Nth root
which is:
let x be the number to calculate the Nth root, n said root and r the result, then we get
rn = x
take the log in any base from both sides, and solve for r
logb( rn ) = logb( x )
n * logb( r ) = logb( x )
logb( r ) = logb( x ) / n
blogb( r ) = blogb( x ) / n
r = blogb( x ) / n
so for instance with log in base 10 we get
>>> pow(10, math.log10(170**3)/3 )
169.9999999999999
>>>
that is much more closer, and with just rounding it we get the answer
>>> round(169.9999999999999)
170
>>>
therefore the function should be something like this
import math
def isPP(x):
for n in range(2, 1+round(math.log2(x)) ):
root = pow( 10, math.log10(x)/n )
result = round(root)
if result**n == x:
return result,n
the upper limit in range is to avoid testing numbers that will certainly fail
test
>>> isPP(170**3)
(170, 3)
>>> isPP(6434856)
(186, 3)
>>> isPP(9**2)
(9, 2)
>>> isPP(23**8)
(279841, 2)
>>> isPP(279841)
(529, 2)
>>> isPP(529)
(23, 2)
>>>
EDIT
or as Tin Peters point out you can use pow(x,1./n) as the nth root of a number is also expressed as x1/n
for example
>>> pow(170**3, 1./3)
169.99999999999994
>>> round(_)
170
>>>
but keep in mind that that will fail for extremely large numbers like for example
>>> pow(8191**107,1./107)
Traceback (most recent call last):
File "<pyshell#90>", line 1, in <module>
pow(8191**107,1./107)
OverflowError: int too large to convert to float
>>>
while the logarithmic approach will success
>>> pow(10, math.log10(8191**107)/107)
8190.999999999999
>>>
the reason is that 8191107 is simple too big, it have 419 digits which is greater that the maximum float representable, but reducing it with a log produce a more reasonable number
EDIT 2
now if you want to work with numbers ridiculously big, or just plain don't want to use floating point arithmetic altogether and use only integer arithmetic, then the best course of action is to use the method of Newton, that the helpful link provided by Tin Peters for the particular case for cube root, show us the way to do it in general alongside the wikipedia article
def inthroot(A,n):
if A<0:
if n%2 == 0:
raise ValueError
return - inthroot(-A,n)
if A==0:
return 0
n1 = n-1
if A.bit_length() < 1024: # float(n) safe from overflow
xk = int( round( pow(A,1/n) ) )
xk = ( n1*xk + A//pow(xk,n1) )//n # Ensure xk >= floor(nthroot(A)).
else:
xk = 1 << -(-A.bit_length()//n) # power of 2 closer but greater than the nth root of A
while True:
sig = A // pow(xk,n1)
if xk <= sig:
return xk
xk = ( n1*xk + sig )//n
check the explanation by Mark Dickinson to understand the working of the algorithm for the case of cube root, which is basically the same for this
now lets compare this with the other one
>>> def nthroot(x,n):
return pow(10, math.log10(x)/n )
>>> n = 2**(2**12) + 1 # a ridiculously big number
>>> r = nthroot(n**2,2)
Traceback (most recent call last):
File "<pyshell#48>", line 1, in <module>
nthroot(n**2,2)
File "<pyshell#47>", line 2, in nthroot
return pow(10, math.log10(x)/n )
OverflowError: (34, 'Result too large')
>>> r = inthroot(n**2,2)
>>> r == n
True
>>>
then the function is now
import math
def isPPv2(x):
for n in range(2,1+round(math.log2(x))):
root = inthroot(x,n)
if root**n == x:
return root,n
test
>>> n = 2**(2**12) + 1 # a ridiculously big number
>>> r,p = isPPv2(n**23)
>>> p
23
>>> r == n
True
>>> isPPv2(170**3)
(170, 3)
>>> isPPv2(8191**107)
(8191, 107)
>>> isPPv2(6434856)
(186, 3)
>>>
now lets check isPP vs isPPv2
>>> x = (1 << 53) + 1
>>> x
9007199254740993
>>> isPP(x**2)
>>> isPPv2(x**2)
(9007199254740993, 2)
>>>
clearly, avoiding floating point is the best choice

Fibonacci numbers, with an one-liner in Python 3?

I know there is nothing wrong with writing with proper function structure, but I would like to know how can I find nth fibonacci number with most Pythonic way with a one-line.
I wrote that code, but It didn't seem to me best way:
>>> fib = lambda n:reduce(lambda x, y: (x[0]+x[1], x[0]), [(1,1)]*(n-2))[0]
>>> fib(8)
13
How could it be better and simplier?
fib = lambda n:reduce(lambda x,n:[x[1],x[0]+x[1]], range(n),[0,1])[0]
(this maintains a tuple mapped from [a,b] to [b,a+b], initialized to [0,1], iterated N times, then takes the first tuple element)
>>> fib(1000)
43466557686937456435688527675040625802564660517371780402481729089536555417949051
89040387984007925516929592259308032263477520968962323987332247116164299644090653
3187938298969649928516003704476137795166849228875L
(note that in this numbering, fib(0) = 0, fib(1) = 1, fib(2) = 1, fib(3) = 2, etc.)
(also note: reduce is a builtin in Python 2.7 but not in Python 3; you'd need to execute from functools import reduce in Python 3.)
A rarely seen trick is that a lambda function can refer to itself recursively:
fib = lambda n: n if n < 2 else fib(n-1) + fib(n-2)
By the way, it's rarely seen because it's confusing, and in this case it is also inefficient. It's much better to write it on multiple lines:
def fibs():
a = 0
b = 1
while True:
yield a
a, b = b, a + b
I recently learned about using matrix multiplication to generate Fibonacci numbers, which was pretty cool. You take a base matrix:
[1, 1]
[1, 0]
and multiply it by itself N times to get:
[F(N+1), F(N)]
[F(N), F(N-1)]
This morning, doodling in the steam on the shower wall, I realized that you could cut the running time in half by starting with the second matrix, and multiplying it by itself N/2 times, then using N to pick an index from the first row/column.
With a little squeezing, I got it down to one line:
import numpy
def mm_fib(n):
return (numpy.matrix([[2,1],[1,1]])**(n//2))[0,(n+1)%2]
>>> [mm_fib(i) for i in range(20)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181]
This is a closed expression for the Fibonacci series that uses integer arithmetic, and is quite efficient.
fib = lambda n:pow(2<<n,n+1,(4<<2*n)-(2<<n)-1)%(2<<n)
>> fib(1000)
4346655768693745643568852767504062580256466051737178
0402481729089536555417949051890403879840079255169295
9225930803226347752096896232398733224711616429964409
06533187938298969649928516003704476137795166849228875L
It computes the result in O(log n) arithmetic operations, each acting on integers with O(n) bits. Given that the result (the nth Fibonacci number) is O(n) bits, the method is quite reasonable.
It's based on genefib4 from http://fare.tunes.org/files/fun/fibonacci.lisp , which in turn was based on an a less efficient closed-form integer expression of mine (see: http://paulhankin.github.io/Fibonacci/)
If we consider the "most Pythonic way" to be elegant and effective then:
def fib(nr):
return int(((1 + math.sqrt(5)) / 2) ** nr / math.sqrt(5) + 0.5)
wins hands down. Why use a inefficient algorithm (and if you start using memoization we can forget about the oneliner) when you can solve the problem just fine in O(1) by approximation the result with the golden ratio? Though in reality I'd obviously write it in this form:
def fib(nr):
ratio = (1 + math.sqrt(5)) / 2
return int(ratio ** nr / math.sqrt(5) + 0.5)
More efficient and much easier to understand.
This is a non-recursive (anonymous) memoizing one liner
fib = lambda x,y=[1,1]:([(y.append(y[-1]+y[-2]),y[-1])[1] for i in range(1+x-len(y))],y[x])[1]
fib = lambda n, x=0, y=1 : x if not n else fib(n-1, y, x+y)
run time O(n), fib(0) = 0, fib(1) = 1, fib(2) = 1 ...
I'm Python newcomer, but did some measure for learning purposes. I've collected some fibo algorithm and took some measure.
from datetime import datetime
import matplotlib.pyplot as plt
from functools import wraps
from functools import reduce
from functools import lru_cache
import numpy
def time_it(f):
#wraps(f)
def wrapper(*args, **kwargs):
start_time = datetime.now()
f(*args, **kwargs)
end_time = datetime.now()
elapsed = end_time - start_time
elapsed = elapsed.microseconds
return elapsed
return wrapper
#time_it
def fibslow(n):
if n <= 1:
return n
else:
return fibslow(n-1) + fibslow(n-2)
#time_it
#lru_cache(maxsize=10)
def fibslow_2(n):
if n <= 1:
return n
else:
return fibslow_2(n-1) + fibslow_2(n-2)
#time_it
def fibfast(n):
if n <= 1:
return n
a, b = 0, 1
for i in range(1, n+1):
a, b = b, a + b
return a
#time_it
def fib_reduce(n):
return reduce(lambda x, n: [x[1], x[0]+x[1]], range(n), [0, 1])[0]
#time_it
def mm_fib(n):
return (numpy.matrix([[2, 1], [1, 1]])**(n//2))[0, (n+1) % 2]
#time_it
def fib_ia(n):
return pow(2 << n, n+1, (4 << 2 * n) - (2 << n)-1) % (2 << n)
if __name__ == '__main__':
X = range(1, 200)
# fibslow_times = [fibslow(i) for i in X]
fibslow_2_times = [fibslow_2(i) for i in X]
fibfast_times = [fibfast(i) for i in X]
fib_reduce_times = [fib_reduce(i) for i in X]
fib_mm_times = [mm_fib(i) for i in X]
fib_ia_times = [fib_ia(i) for i in X]
# print(fibslow_times)
# print(fibfast_times)
# print(fib_reduce_times)
plt.figure()
# plt.plot(X, fibslow_times, label='Slow Fib')
plt.plot(X, fibslow_2_times, label='Slow Fib w cache')
plt.plot(X, fibfast_times, label='Fast Fib')
plt.plot(X, fib_reduce_times, label='Reduce Fib')
plt.plot(X, fib_mm_times, label='Numpy Fib')
plt.plot(X, fib_ia_times, label='Fib ia')
plt.xlabel('n')
plt.ylabel('time (microseconds)')
plt.legend()
plt.show()
The result is usually the same.
Fiboslow_2 with recursion and cache, Fib integer arithmetic and Fibfast algorithms seems the best ones. Maybe my decorator not the best thing to measure performance, but for an overview it seemed good.
Another example, taking the cue from Mark Byers's answer:
fib = lambda n,a=0,b=1: a if n<=0 else fib(n-1,b,a+b)
I wanted to see if I could create an entire sequence, not just the final value.
The following will generate a list of length 100. It excludes the leading [0, 1] and works for both Python2 and Python3. No other lines besides the one!
(lambda i, x=[0,1]: [(x.append(x[y+1]+x[y]), x[y+1]+x[y])[1] for y in range(i)])(100)
Output
[1,
2,
3,
...
218922995834555169026,
354224848179261915075,
573147844013817084101]
Here's an implementation that doesn't use recursion, and only memoizes the last two values instead of the whole sequence history.
nthfib() below is the direct solution to the original problem (as long as imports are allowed)
It's less elegant than using the Reduce methods above, but, although slightly different that what was asked for, it gains the ability to to be used more efficiently as an infinite generator if one needs to output the sequence up to the nth number as well (re-writing slightly as fibgen() below).
from itertools import imap, islice, repeat
nthfib = lambda n: next(islice((lambda x=[0, 1]: imap((lambda x: (lambda setx=x.__setitem__, x0_temp=x[0]: (x[1], setx(0, x[1]), setx(1, x0_temp+x[1]))[0])()), repeat(x)))(), n-1, None))
>>> nthfib(1000)
43466557686937456435688527675040625802564660517371780402481729089536555417949051
89040387984007925516929592259308032263477520968962323987332247116164299644090653
3187938298969649928516003704476137795166849228875L
from itertools import imap, islice, repeat
fibgen = lambda:(lambda x=[0,1]: imap((lambda x: (lambda setx=x.__setitem__, x0_temp=x[0]: (x[1], setx(0, x[1]), setx(1, x0_temp+x[1]))[0])()), repeat(x)))()
>>> list(islice(fibgen(),12))
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
def fib(n):
x =[0,1]
for i in range(n):
x=[x[1],x[0]+x[1]]
return x[0]
take the cue from Jason S, i think my version have a better understanding.
Starting Python 3.8, and the introduction of assignment expressions (PEP 572) (:= operator), we can use and update a variable within a list comprehension:
fib = lambda n,x=(0,1):[x := (x[1], sum(x)) for i in range(n+1)][-1][0]
This:
Initiates the duo n-1 and n-2 as a tuple x=(0, 1)
As part of a list comprehension looping n times, x is updated via an assignment expression (x := (x[1], sum(x))) to the new n-1 and n-2 values
Finally, we return from the last iteration, the first part of the x
To solve this problem I got inspired by a similar question here in Stackoverflow Single Statement Fibonacci, and I got this single line function that can output a list of fibonacci sequence. Though, this is a Python 2 script, not tested on Python 3:
(lambda n, fib=[0,1]: fib[:n]+[fib.append(fib[-1] + fib[-2]) or fib[-1] for i in range(n-len(fib))])(10)
assign this lambda function to a variable to reuse it:
fib = (lambda n, fib=[0,1]: fib[:n]+[fib.append(fib[-1] + fib[-2]) or fib[-1] for i in range(n-len(fib))])
fib(10)
output is a list of fibonacci sequence:
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
I don't know if this is the most pythonic method but this is the best i could come up with:->
Fibonacci = lambda x,y=[1,1]:[1]*x if (x<2) else ([y.append(y[q-1] + y[q-2]) for q in range(2,x)],y)[1]
The above code doesn't use recursion, just a list to store the values.
My 2 cents
# One Liner
def nthfibonacci(n):
return long(((((1+5**.5)/2)**n)-(((1-5**.5)/2)**n))/5**.5)
OR
# Steps
def nthfibonacci(nth):
sq5 = 5**.5
phi1 = (1+sq5)/2
phi2 = -1 * (phi1 -1)
n1 = phi1**(nth+1)
n2 = phi2**(nth+1)
return long((n1 - n2)/sq5)
Why not use a list comprehension?
from math import sqrt, floor
[floor(((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5))) for n in range(100)]
Without math imports, but less pretty:
[int(((1+(5**0.5))**n-(1-(5**0.5))**n)/(2**n*(5**0.5))) for n in range(100)]
import math
sqrt_five = math.sqrt(5)
phi = (1 + sqrt_five) / 2
fib = lambda n : int(round(pow(phi, n) / sqrt_five))
print([fib(i) for i in range(1, 26)])
single line lambda fibonacci but with some extra variables
Similar:
def fibonacci(n):
f=[1]+[0]
for i in range(n):
f=[sum(f)] + f[:-1]
print f[1]
A simple Fibonacci number generator using recursion
fib = lambda x: 1-x if x < 2 else fib(x-1)+fib(x-2)
print fib(100)
This takes forever to calculate fib(100) in my computer.
There is also closed form of Fibonacci numbers.
fib = lambda n: int(1/sqrt(5)*((1+sqrt(5))**n-(1-sqrt(5))**n)/2**n)
print fib(50)
This works nearly up to 72 numbers due to precision problem.
Lambda with logical operators
fibonacci_oneline = lambda n = 10, out = []: [ out.append(i) or i if i <= 1 else out.append(out[-1] + out[-2]) or out[-1] for i in range(n)]
here is how i do it ,however the function returns None for the list comprehension line part to allow me to insert a loop inside ..
so basically what it does is appending new elements of the fib seq inside of a list which is over two elements
>>f=lambda list,x :print('The list must be of 2 or more') if len(list)<2 else [list.append(list[-1]+list[-2]) for i in range(x)]
>>a=[1,2]
>>f(a,7)
You can generate once a list with some values and use as needed:
fib_fix = []
fib = lambda x: 1 if x <=2 else fib_fix[x-3] if x-2 <= len(fib_fix) else (fib_fix.append(fib(x-2) + fib(x-1)) or fib_fix[-1])
fib_x = lambda x: [fib(n) for n in range(1,x+1)]
fib_100 = fib_x(100)
than for example:
a = fib_fix[76]

What is the best way to get all the divisors of a number?

Here's the very dumb way:
def divisorGenerator(n):
for i in xrange(1,n/2+1):
if n%i == 0: yield i
yield n
The result I'd like to get is similar to this one, but I'd like a smarter algorithm (this one it's too much slow and dumb :-)
I can find prime factors and their multiplicity fast enough.
I've an generator that generates factor in this way:
(factor1, multiplicity1)
(factor2, multiplicity2)
(factor3, multiplicity3)
and so on...
i.e. the output of
for i in factorGenerator(100):
print i
is:
(2, 2)
(5, 2)
I don't know how much is this useful for what I want to do (I coded it for other problems), anyway I'd like a smarter way to make
for i in divisorGen(100):
print i
output this:
1
2
4
5
10
20
25
50
100
UPDATE: Many thanks to Greg Hewgill and his "smart way" :)
Calculating all divisors of 100000000 took 0.01s with his way against the 39s that the dumb way took on my machine, very cool :D
UPDATE 2: Stop saying this is a duplicate of this post. Calculating the number of divisor of a given number doesn't need to calculate all the divisors. It's a different problem, if you think it's not then look for "Divisor function" on wikipedia. Read the questions and the answer before posting, if you do not understand what is the topic just don't add not useful and already given answers.
Given your factorGenerator function, here is a divisorGen that should work:
def divisorGen(n):
factors = list(factorGenerator(n))
nfactors = len(factors)
f = [0] * nfactors
while True:
yield reduce(lambda x, y: x*y, [factors[x][0]**f[x] for x in range(nfactors)], 1)
i = 0
while True:
f[i] += 1
if f[i] <= factors[i][1]:
break
f[i] = 0
i += 1
if i >= nfactors:
return
The overall efficiency of this algorithm will depend entirely on the efficiency of the factorGenerator.
To expand on what Shimi has said, you should only be running your loop from 1 to the square root of n. Then to find the pair, do n / i, and this will cover the whole problem space.
As was also noted, this is a NP, or 'difficult' problem. Exhaustive search, the way you are doing it, is about as good as it gets for guaranteed answers. This fact is used by encryption algorithms and the like to help secure them. If someone were to solve this problem, most if not all of our current 'secure' communication would be rendered insecure.
Python code:
import math
def divisorGenerator(n):
large_divisors = []
for i in xrange(1, int(math.sqrt(n) + 1)):
if n % i == 0:
yield i
if i*i != n:
large_divisors.append(n / i)
for divisor in reversed(large_divisors):
yield divisor
print list(divisorGenerator(100))
Which should output a list like:
[1, 2, 4, 5, 10, 20, 25, 50, 100]
I think you can stop at math.sqrt(n) instead of n/2.
I will give you example so that you can understand it easily. Now the sqrt(28) is 5.29 so ceil(5.29) will be 6. So I if I will stop at 6 then I will can get all the divisors. How?
First see the code and then see image:
import math
def divisors(n):
divs = [1]
for i in xrange(2,int(math.sqrt(n))+1):
if n%i == 0:
divs.extend([i,n/i])
divs.extend([n])
return list(set(divs))
Now, See the image below:
Lets say I have already added 1 to my divisors list and I start with i=2 so
So at the end of all the iterations as I have added the quotient and the divisor to my list all the divisors of 28 are populated.
Source: How to determine the divisors of a number
Although there are already many solutions to this, I really have to post this :)
This one is:
readable
short
self contained, copy & paste ready
quick (in cases with a lot of prime factors and divisors, > 10 times faster than the accepted solution)
python3, python2 and pypy compliant
Code:
def divisors(n):
# get factors and their counts
factors = {}
nn = n
i = 2
while i*i <= nn:
while nn % i == 0:
factors[i] = factors.get(i, 0) + 1
nn //= i
i += 1
if nn > 1:
factors[nn] = factors.get(nn, 0) + 1
primes = list(factors.keys())
# generates factors from primes[k:] subset
def generate(k):
if k == len(primes):
yield 1
else:
rest = generate(k+1)
prime = primes[k]
for factor in rest:
prime_to_i = 1
# prime_to_i iterates prime**i values, i being all possible exponents
for _ in range(factors[prime] + 1):
yield factor * prime_to_i
prime_to_i *= prime
# in python3, `yield from generate(0)` would also work
for factor in generate(0):
yield factor
An illustrative Pythonic one-liner:
from itertools import chain
from math import sqrt
def divisors(n):
return set(chain.from_iterable((i,n//i) for i in range(1,int(sqrt(n))+1) if n%i == 0))
But better yet, just use sympy:
from sympy import divisors
I like Greg solution, but I wish it was more python like.
I feel it would be faster and more readable;
so after some time of coding I came out with this.
The first two functions are needed to make the cartesian product of lists.
And can be reused whnever this problem arises.
By the way, I had to program this myself, if anyone knows of a standard solution for this problem, please feel free to contact me.
"Factorgenerator" now returns a dictionary. And then the dictionary is fed into "divisors", who uses it to generate first a list of lists, where each list is the list of the factors of the form p^n with p prime.
Then we make the cartesian product of those lists, and we finally use Greg' solution to generate the divisor.
We sort them, and return them.
I tested it and it seem to be a bit faster than the previous version. I tested it as part of a bigger program, so I can't really say how much is it faster though.
Pietro Speroni (pietrosperoni dot it)
from math import sqrt
##############################################################
### cartesian product of lists ##################################
##############################################################
def appendEs2Sequences(sequences,es):
result=[]
if not sequences:
for e in es:
result.append([e])
else:
for e in es:
result+=[seq+[e] for seq in sequences]
return result
def cartesianproduct(lists):
"""
given a list of lists,
returns all the possible combinations taking one element from each list
The list does not have to be of equal length
"""
return reduce(appendEs2Sequences,lists,[])
##############################################################
### prime factors of a natural ##################################
##############################################################
def primefactors(n):
'''lists prime factors, from greatest to smallest'''
i = 2
while i<=sqrt(n):
if n%i==0:
l = primefactors(n/i)
l.append(i)
return l
i+=1
return [n] # n is prime
##############################################################
### factorization of a natural ##################################
##############################################################
def factorGenerator(n):
p = primefactors(n)
factors={}
for p1 in p:
try:
factors[p1]+=1
except KeyError:
factors[p1]=1
return factors
def divisors(n):
factors = factorGenerator(n)
divisors=[]
listexponents=[map(lambda x:k**x,range(0,factors[k]+1)) for k in factors.keys()]
listfactors=cartesianproduct(listexponents)
for f in listfactors:
divisors.append(reduce(lambda x, y: x*y, f, 1))
divisors.sort()
return divisors
print divisors(60668796879)
P.S.
it is the first time I am posting to stackoverflow.
I am looking forward for any feedback.
Here is a smart and fast way to do it for numbers up to and around 10**16 in pure Python 3.6,
from itertools import compress
def primes(n):
""" Returns a list of primes < n for n > 2 """
sieve = bytearray([True]) * (n//2)
for i in range(3,int(n**0.5)+1,2):
if sieve[i//2]:
sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)
return [2,*compress(range(3,n,2), sieve[1:])]
def factorization(n):
""" Returns a list of the prime factorization of n """
pf = []
for p in primeslist:
if p*p > n : break
count = 0
while not n % p:
n //= p
count += 1
if count > 0: pf.append((p, count))
if n > 1: pf.append((n, 1))
return pf
def divisors(n):
""" Returns an unsorted list of the divisors of n """
divs = [1]
for p, e in factorization(n):
divs += [x*p**k for k in range(1,e+1) for x in divs]
return divs
n = 600851475143
primeslist = primes(int(n**0.5)+1)
print(divisors(n))
If your PC has tons of memory, a brute single line can be fast enough with numpy:
N = 10000000; tst = np.arange(1, N); tst[np.mod(N, tst) == 0]
Out:
array([ 1, 2, 4, 5, 8, 10, 16,
20, 25, 32, 40, 50, 64, 80,
100, 125, 128, 160, 200, 250, 320,
400, 500, 625, 640, 800, 1000, 1250,
1600, 2000, 2500, 3125, 3200, 4000, 5000,
6250, 8000, 10000, 12500, 15625, 16000, 20000,
25000, 31250, 40000, 50000, 62500, 78125, 80000,
100000, 125000, 156250, 200000, 250000, 312500, 400000,
500000, 625000, 1000000, 1250000, 2000000, 2500000, 5000000])
Takes less than 1s on my slow PC.
Adapted from CodeReview, here is a variant which works with num=1 !
from itertools import product
import operator
def prod(ls):
return reduce(operator.mul, ls, 1)
def powered(factors, powers):
return prod(f**p for (f,p) in zip(factors, powers))
def divisors(num) :
pf = dict(prime_factors(num))
primes = pf.keys()
#For each prime, possible exponents
exponents = [range(i+1) for i in pf.values()]
return (powered(primes,es) for es in product(*exponents))
Old question, but here is my take:
def divs(n, m):
if m == 1: return [1]
if n % m == 0: return [m] + divs(n, m - 1)
return divs(n, m - 1)
You can proxy with:
def divisorGenerator(n):
for x in reversed(divs(n, n)):
yield x
NOTE: For languages that support, this could be tail recursive.
Assuming that the factors function returns the factors of n (for instance, factors(60) returns the list [2, 2, 3, 5]), here is a function to compute the divisors of n:
function divisors(n)
divs := [1]
for fact in factors(n)
temp := []
for div in divs
if fact * div not in divs
append fact * div to temp
divs := divs + temp
return divs
Here's my solution. It seems to be dumb but works well...and I was trying to find all proper divisors so the loop started from i = 2.
import math as m
def findfac(n):
faclist = [1]
for i in range(2, int(m.sqrt(n) + 2)):
if n%i == 0:
if i not in faclist:
faclist.append(i)
if n/i not in faclist:
faclist.append(n/i)
return facts
If you only care about using list comprehensions and nothing else matters to you!
from itertools import combinations
from functools import reduce
def get_devisors(n):
f = [f for f,e in list(factorGenerator(n)) for i in range(e)]
fc = [x for l in range(len(f)+1) for x in combinations(f, l)]
devisors = [1 if c==() else reduce((lambda x, y: x * y), c) for c in set(fc)]
return sorted(devisors)
My solution via generator function is:
def divisor(num):
for x in range(1, num + 1):
if num % x == 0:
yield x
while True:
yield None
Try to calculate square root a given number and then loop range(1,square_root+1).
number = int(input("Enter a Number: "))
square_root = round(number ** (1.0 / 2))
print(square_root)
divisor_list = []
for i in range(1,square_root+1):
if number % i == 0: # Check if mod return 0 if yes then append i and number/i in the list
divisor_list.append(i)
divisor_list.append(int(number/i))
print(divisor_list)
def divisorGen(n): v = n last = [] for i in range(1, v+1) : if n % i == 0 : last.append(i)
I don´t understand why there are so many complicated solutions to this problem.
Here is my take on it:
def divisors(n):
lis =[1]
s = math.ceil(math.sqrt(n))
for g in range(s,1, -1):
if n % g == 0:
lis.append(g)
lis.append(int(n / g))
return (set(lis))
return [x for x in range(n+1) if n/x==int(n/x)]

Categories

Resources