Implementing fibonacci series using greedy approach? - python

I have implemented fibonacci series using recursion:
def fibonacci(n):
if n==0:
return 0
elif n==1:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
I have also implemented it using dynamic programming:
def fibonacci(n):
result = [0, 1]
if n > 1:
for i in range(2, n+1):
result.append(result[i-1] + result[i-2])
return result[n]
I want to implement it using greedy approach. I am unable to think of it in greedy terms. Please provide a greedy approach for this problem.

I didn't understand what you wanted to say by saying the word 'greedy'. But these are ways:
Example 1: Using looping technique
def fib(n):
a,b = 1,1
for i in range(n-1):
a,b = b,a+b
return a
print fib(5)
Example 2: Using recursion
def fibR(n):
if n==1 or n==2:
return 1
return fibR(n-1)+fibR(n-2)
print fibR(5)
Example 3: Using generators
a,b = 0,1
def fibI():
global a,b
while True:
a,b = b, a+b
yield a
f=fibI()
f.next()
f.next()
f.next()
f.next()
print f.next()
Example 4: Using memoization
def memoize(fn, arg):
memo = {}
if arg not in memo:
memo[arg] = fn(arg)
return memo[arg]
fib() as written in example 1.
fibm = memoize(fib,5)
print fibm
Example 5: Using memoization as the decorator
class Memoize:
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __call__(self, arg):
if arg not in self.memo:
self.memo[arg] = self.fn(arg)
return self.memo[arg]
#Memoize
def fib(n):
a,b = 1,1
for i in range(n-1):
a,b = b,a+b
return a
print fib(5)

Related

Python nested currying

I was trying to solve a codewars problem here, and I got a bit stuck. I believe I should be using nested currying in Python.
Let us just take the case of add. Let us constrain the problem even more, and just get nested add working on the right hand side, i.e. write an add function such that
print((add)(3)(add)(5)(4))
prints 12.
It should be possible to nest it as deep as required, for e.g. I want
print((add)(add)(3)(4)(add)(5)(6))
should give me 18.
What I have done so far -
My initial attempt is to use the following nested function -
def add_helper():
current_sum = 0
def inner(inp):
if isinstance(inp, int):
nonlocal current_sum
current_sum += inp
print(f"current_sum = {current_sum}")
return inner
return inner
add = add_helper()
However, this does not do the trick. Instead, I get the following output, for when I do something like print((add)(add)(3)(4)(add)(5)(6))
current_sum = 3
current_sum = 7
current_sum = 12
current_sum = 18
<function add_helper.<locals>.inner at 0x...>
Does anyone know how I have to change my function so that I just return 18, because the function will know it is "done"?
Any help will be appreciated!
UPDATE
After looking at Bharel's comments, I have the following so far -
def add_helper():
val = 0
ops_so_far = []
def inner(inp):
if isinstance(inp, int):
nonlocal val
val += inp
return inner
else:
ops_so_far.append(("+", val))
inp.set_ops_so_far(ops_so_far)
return inp
def set_ops_so_far(inp_list):
nonlocal ops_so_far
ops_so_far = inp_list
def get_val():
nonlocal val
return val
def get_ops_so_far():
nonlocal ops_so_far
return ops_so_far
inner.get_ops_so_far = get_ops_so_far
inner.set_ops_so_far = set_ops_so_far
inner.get_val = get_val
return inner
def mul_helper():
val = 1
ops_so_far = []
def inner(inp):
if isinstance(inp, int):
nonlocal val
val *= inp
return inner
else:
ops_so_far.append(("*", val))
inp.set_ops_so_far(ops_so_far)
return inp
def get_ops_so_far():
nonlocal ops_so_far
return ops_so_far
def set_ops_so_far(inp_list):
nonlocal ops_so_far
ops_so_far = inp_list
def get_val():
nonlocal val
return val
inner.get_ops_so_far = get_ops_so_far
inner.get_val = get_val
inner.set_ops_so_far = set_ops_so_far
return inner
add = add_helper()
mul = mul_helper()
and now when I do
res = (add)(add)(3)(4)(mul)(5)(6)
print(res.get_ops_so_far())
print(res.get_val())
I get
[('+', 0), ('+', 7)]
30
Still not sure if this is the correct direction to be following?
This is how I solved it for anyone still looking in the future -
from copy import deepcopy
def start(arg):
def start_evalutaion(_arg, eval_stack, variables):
new_eval_stack = deepcopy(eval_stack)
new_variables = deepcopy(variables)
to_ret = evaluate_stack(_arg, new_eval_stack, new_variables)
if to_ret is not None:
return to_ret
def inner(inner_arg):
return start_evalutaion(
inner_arg, new_eval_stack, new_variables
)
return inner
return start_evalutaion(arg, [], dict())
add = lambda a, b, variables: variables.get(a, a) + variables.get(b, b)
sub = lambda a, b, variables: variables.get(a, a) - variables.get(b, b)
mul = lambda a, b, variables: variables.get(a, a) * variables.get(b, b)
div = lambda a, b, variables: variables.get(a, a) // variables.get(b, b)
def let(name, val, variables):
variables[name] = val
return
def return_(val, variables):
return variables.get(val, val)
def evaluate_stack(_arg, eval_stack, variables):
if callable(_arg):
if _arg.__name__ == "return_":
req_args = 1
else:
req_args = 2
eval_stack.append((_arg, req_args, []))
else:
while True:
func_to_eval, req_args, args_so_far = eval_stack[-1]
args_so_far.append(_arg)
if len(args_so_far) == req_args:
eval_stack.pop()
_arg = func_to_eval(*args_so_far, variables)
if func_to_eval.__name__ == "return_":
return _arg
elif _arg is None:
break
else:
break
Passes all testcases

List comprehension based on choices

Basically, if I were to write a function with variable return elements, like so:
def func(elem1=True, elem2=True, elem3=True, elem4=False):
x = MyClass()
ret = []
if elem1:
ret.extend([x.func1()])
if elem2:
ret.extend([x.obj1])
if elem3:
ret.extend([x.func2().attr1])
if elem4:
ret.extend(x.list_obj3)
return ret
Things get rather long and windy. Is it possible to do something like this perhaps:
def func(elem1=True, elem2=True, elem3=True, elem4=False):
x = MyClass()
return [x.func1() if elem1,
x.obj1 if elem2,
x.func2().attr1 if elem3,
x.list_obj3 if elem4]
How neat is that!?
I know this can be done:
def func(elem1=True, elem2=True, elem3=True, elem4=False):
x = MyClass()
ret = [x.func1(), x.obj1, x.func2().attr1, x.list_obj3]
choices = [elem1, elem2, elem3, elem4]
return [r for i, r in enumerate(ret) if choices[i]]
but I would like to not calculate the elements if the user does not want them; it is a little expensive to calculate some of them.
If you hide your operations in lambdas then you can use lazy evaluation:
def func(elem1=True, elem2=True, elem3=True, elem4=False):
x = MyClass()
return [L() for inc,L in (
(elem1, lambda: x.func1()),
(elem2, lambda: x.obj1),
(elem3, lambda: x.func2().attr1),
(elem4, lambda: x.list_obj3),
) if inc]
Asking a slightly different question, can you get behaviour like matlab/octave, where you only calculate the first two results if you are assigning to two variables, without computing results 3 and 4?
For example:
a, b = func()
Python can't quite do it since func() doesn't know how many return values it wants, but you can get close using:
from itertools import islice
def func():
x = MyClass()
yield x.fun c1()
yield x.obj1
yield x.func2().attr1
yield x.list_obj3
a, b = islice(func(), 2)
I'm not sure it is better, but you could add array indexing semantics using a decorator, which would allow you to write:
#sliceable
def func():
...
a, b = func()[:2]
This is easy enough to implement:
from itertools import islice
class SlicedIterator(object):
def __init__(self, it):
self.it = it
def __iter__(self):
return self.it
def __getitem__(self, idx):
if not isinstance(idx, slice):
for _ in range(idx): next(self.it)
return next(self.it)
return list(islice(self.it, idx.start, idx.stop, idx.step))
def sliceable(f):
def wraps(*args, **kw):
return SlicedIterator(f(*args, **kw))
return wraps
Testing:
#sliceable
def f():
print("compute 1")
yield 1
print("compute 2")
yield 2
print("compute 3")
yield 3
print("compute 4")
yield 4
print("== compute all four")
a, b, c, d = f()
print("== compute first two")
a, b = f()[:2]
print("== compute one only")
a = f()[0]
print("== all as a list")
a = f()[:]
gives:
== compute all four
compute 1
compute 2
compute 3
compute 4
== compute first two
compute 1
compute 2
== compute one only
compute 1
== all as a list
compute 1
compute 2
compute 3
compute 4

Code not working inside a class

i'm trying to solve an exercice which consists in a primal factorization of a given number.
def primes(n):
p=[]
for a in range(2,n+1):
if (is_prime(a)): p.append(a)
return p
def is_prime(number):
for n in range(2,number):
if not number%n : return False
return True
def factor(z):
p=primes(z)
fact={}
if z in p:
fact[z]=1
else:
for n in p:
while z%n==0 and (z/n not in p):
if n not in fact: fact[n]=1
else: fact[n]+=1
z/=n
if z%n==0 and (z/n in p):
if n in fact: fact[n]+=1+(z/n==n)
else:
fact[n]=1
fact[z/n]=1+(z/n==n)
break
return fact
print factor(13)
My code work like a charm; but the exercice wants me to put it in a class starting with :
class PrimeFactorizer:
#your code here
Which would be call with PrimeFactorizer(n).factor
When i add the working code above into
class PrimeFactorizer:
def __init__(self, z):
self.z=z
It returns me an empty stuff when calling PrimeFactorizer(13).factor
Could someone give me a hint on what i'm doing wrong ?
Thanks
This should do the trick:
class PrimeFactorizer:
def __init__(self, z):
self.z=z
def primes(self,n):
p=[]
for a in range(2,n+1):
if (self.is_prime(a)): p.append(a)
return p
def is_prime(self,number):
for n in range(2,number):
if not number%n : return False
return True
def factor(self):
p=self.primes(self.z)
fact={}
if self.z in p:
fact[self.z]=1
else:
for n in p:
while self.z%n==0 and (self.z/n not in p):
if n not in fact: fact[n]=1
else: fact[n]+=1
self.z/=n
if self.z%n==0 and (self.z/n in p):
if n in fact: fact[n]+=1+(self.z/n==n)
else:
fact[n]=1
fact[self.z/n]=1+(z/n==n)
break
return fact
print PrimeFactorizer(8).factor() # Output: {2: 3}
print PrimeFactorizer(13).factor() # Output: {13: 1}}

Difference between Python decorator with and without syntactic sugar?

I'm trying to implement a decorator that memoizes an arbitrary function. It appears I've successfully accomplished that with the following code:
def memoize(func):
cache = {}
def wrapper(*args, **kwargs):
acc = ""
for arg in args:
acc += str(arg)
if acc in cache:
return cache[acc]
else:
cache[acc] = func(*args, **kwargs)
return cache[acc]
return wrapper
#memoize
def fib(n):
if n == 0 or n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
Then fib(100) returns 573147844013817084101 fairly quickly. However, if I don't use the syntactic sugar:
def fib(n):
if n == 0 or n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
memoized = memoize(fib)
print memoized(100)
The function hangs. Debugging it, it looks like the wrapper returned is unable to modify the cache. Can someone explain this behavior? As far as I know there shouldn't be a difference between using the sugar and not using the sugar.
Your recursive call is not memoized, because you used a new name, not the original fib function name. Each fib() iteration calls back to fib(), but that will call the original, undecorated function.
Assign the return value of the decorator call to fib instead:
def fib(n):
if n == 0 or n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
fib = memoize(fib)
print fib(100)
Alternatively, if you must use memoize as the name of the decorator result, have fib() call memoized() for recursive calls:
def fib(n):
if n == 0 or n == 1:
return 1
else:
return memoized(n - 1) + memoizzed(n - 2)
memoized = memoized(fib)
print memoized(100)
Remember, the #decorator syntax assigns to the same name, not a new name. The following two are equivalent:
#memoize
def fib(n):
# ....
and
def fib(n):
# ....
fib = memoize(fib) # Same name!
except the name fib is never bound to the original function first.

How to create a generator function that calls generator functions

Consider the following (non-working) example code:
class MyGenerator:
def test_gen(self):
for i in range(1,5):
if i % 2:
self.foo(i)
else:
self.bar(i)
def foo(self, i):
yield i
def bar(self, i):
yield i**2
g = MyGenerator()
for i in g.test_gen():
print i
This will not work, because test_gen has no yield and is no longer a generator function. In this small example I could just return the values from foo and bar and put the yield into test_gen, however I have a case where that's not possible. How can I turn test_gen into a generator function again?
You need to loop over the results of the delegated generators and yield those:
def test_gen(self):
for i in range(1,5):
if i % 2:
for res in self.foo(i):
yield res
else:
for res in self.bar(i):
yield res
If you are using Python 3.3 or up, you'd use the yield from expression to do proper generator delegation:
def test_gen(self):
for i in range(1,5):
if i % 2:
yield from self.foo(i)
else:
yield from self.bar(i)
Both re-introduce yield into the function, once again making it a generator function.
why not just:
class MyGenerator:
def test_gen(self):
for i in range(1,5):
if i % 2:
yield next(self.foo(i))
else:
yield next(self.bar(i))
def foo(self, i):
yield i
def bar(self, i):
yield i**2

Categories

Resources