How to represent complex matrix from 2 real-valued matrices - python

I'm trying to represent a matrix of complex numbers using 2 real value matrices (in Pytorch, but using numpy here just for illustrative purposes).
Currently I'm doing it this way:
import numpy as np
# represent real
X = np.random.randn(10,10)
# represent imaginary
I = np.random.randn(10,10)
# build complex matrix using the identity
real = np.concatenate([X, -I], axis=-1)
img = np.concatenate([I, X], axis=-1)
complex_W = np.concatenate([real, img,], axis=0)
# is complex_W now correctly represented??
Could I also do it this way?
# represent real
X = np.random.randn(10,10)
# represent imaginary
I = np.random.randn(10,10)
complex_W = X + I

You can implement complex ndarray data structure using numpy arrays. You may want to store real part in one variable of the datasture and complex in anthor variable. Python provides a way to overload some operators including +, -, *, /. E.g I've the following class implements complex data structure with three operators(+, - , *)
class ComplexNDArray(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
#property
def real(self):
return self.__real
#real.setter
def real(self, value):
if type(value) == np.ndarray:
self.__real = value
elif isinstance(value, (int, float, list, tuple)):
self.__real = np.array(value)
else:
raise ValueError("Unsupported type value:%s" % (str(type(value))))
#property
def imaginary(self):
return self.__imaginary
#imaginary.setter
def imaginary(self, value):
if type(value) == np.ndarray:
self.__imaginary = value
elif isinstance(value, (int, float, list, tuple)):
self.__imaginary = np.array(value)
else:
raise ValueError("Unsupported type value:%s" % (str(type(value))))
def __add__(self, other):
real = self.real + other.real
imaginary = self.imaginary + other.imaginary
return ComplexNDArray(real, imaginary)
def __sub__(self, other):
real = self.real - other.real
imaginary = self.imaginary - other.imaginary
return ComplexNDArray(real, imaginary)
def __mul__(self, other):
real = self.real * other.real - self.imaginary * other.imaginary
imaginary = self.real * other.imaginary + self.imaginary * other.real
return ComplexNDArray(real, imaginary)
def __str__(self):
return str(self.real) + "+"+str(self.imaginary)+"i"
Now you can use this data structure to do some operations.
a = np.array([1, 2,3])
b = np.array([4, 5, 1])
c = np.array([4, 7,3])
d = np.array([5, 1,7])
cmplx = ComplexNDArray(a, b)
cmplx2 = ComplexNDArray(c, d)
print(cmplx) # [1 2 3]+[4 5 1]i
print(cmplx2) # [4 7 3]+[5 1 7]i
print(cmplx+cmplx2) # [5 9 6]+[9 6 8]i
print(cmplx-cmplx2) # [-3 -5 0]+[-1 4 -6]i
print(cmplx*cmplx2) # [-16 9 2]+[21 37 24]i

Related

Clean way to create 'n-digit precision' float child class in Python

I want to create a LimitedPrecisionFloat class that would allow me to store a float with n-digits (I don't want to round the number, just cut the remaining digits). This is how I would like it to behave:
a = LimitedPrecisionFloat(3.45678, ndigits=3)
print(a) # 3.456
a += 0.11199
print(a) # 3.567
a -= 0.567999
print(a) # 3 (or 3.0 or 3.000)
print(a + 1.9999) # 4.999
# same with other arithmetic operators, ie.
print(LimitedPrecisionFloat(1.11111 * 9.0, 3) # 9.999
...
What I currently have is:
def round_down(x:float, ndigits:int):
str_x = str(x)
int_part = str(int(x))
fraction_part = str_x[str_x.find('.') + 1:]
fraction_part = fraction_part[:ndigits]
return float(int_part + '.' + fraction_part)
class LimitedPrecisionFloat(float):
def __new__(cls, x, ndigits:int):
return super(LimitedPrecisionFloat, cls).__new__(cls, round_down(x, ndigits))
def __init__(self, x, ndigits: int):
super().__init__()
self.ndigits = ndigits
def __add__(self, other):
return round_down(float(self) + other, ndigits=self.ndigits)
def __sub__(self, other):
return round_down(float(self) - other, ndigits=self.ndigits)
# other operators
However, this will not work, because whenever __add__ (or any other operator) is called, it returns float and the precision is back in the number. So when I tried doing something like this:
def __add__(self, other):
return LimitedPrecisionFloat(float(self) + other, ndigits=self.ndigits)
But then I got the max recursion depth error.
Because of that, I also don't have a clue on how to implement augmented arithmetic assignments, like __iadd__.

a python class for modular integers that supports addition and negation?

I'm trying to come up with code for the definition addition and negation in python. The most similar thing I have is for multiplication, which I've copied below. Any ideas on what should be changed to account for addition and negation of modular integers? I know subtraction is just the addition of negation in this problem. Any help would be much appreciated!!
class Zp:
p = 2 #class attribute
def __init__(self, n):
self.n = n % Zp.p #instance attribute
def __str__(self):
#msg = '[' + str(self.n) + ']'
msg = str(self.n) + ' mod ' + str(Zp.p) + ')'
return msg
def __repr__(self):
return self.__str__()
# def __add__(self, other):
# [a], [b] in Zp ([a], [b] are sets of integers)
# [a]+[b] = [a+b] e.g. a+b mod p
# def __neg__(self, other):
# def __sub__ (self, other): # the addition of negation
def __mul__(self, other): # [a]*[b] = [a*b] e.g. a*b mod p
if Zp.__instancecheck__(other):
return Zp(self.n * other.n)
elif type(other) == int:
return Zp(self.n * other)
else:
raise TypeError('multiplication a Zp with a non-Zp or non-integer')
what I have tried for addition:
def __add__(self, other): # [a]*[b] = [a*b] e.g. a*b mod p
if Zp.__instancecheck__(other):
return Zp(self.n + other.n)
elif type(other) == int:
return Zp(self.n + other)
When doing modular arithmetic on numbers, you can generally do the normal operation (adding, subtracting, multiplying), then take the answer mod your modulus.
For example,
3 * 3 mod 2 = 9 mod 2 = 1 mod 2
In order to implement the operations you're specifying, do the operation, then take the modulo. Take a look at Wikipedia's article for more information about how to do modular arithmetic: Modular Arithmetic - Wikipedia

Creating Polynomial Class in Python

I am currently working on creating a Polynomial class that includes add , mul and eval methods. I'm currently stuck on the addition portion, if someone could give me some help on how to get that figured out that would be greatly appreciated. Everything currently works without errors but when I do p3 = p1 + p2 and I print p3 I get back the two lists together. Any feedback would be greatly appreciated.
class Polynomial(object):
def __init__(self, *coeffs, num = 0):
self.coeffs = list(coeffs) # turned into a list
assert (type(num) is type(1))
self.num = num
# Needs to be operator overload
'''
def __mul__(self, other):
'''
def __eval__(self, other, coeff, x):
result = coeff[-1]
for i in range(-2, -len(coeff)-1, -1):
result = result * x + coeff[i]
return result
def __add__(self, other):
assert type(other) is Polynomial
num = self.coeffs + other.coeffs
return Polynomial(num)
def __sub__(self, other):
assert type(other) is Polynomial
num = self.coeffs - other.coeffs
return Polynomial(num)
def __represntation__(self):
return "Polynomial" + str(self.coeffs)
def __str__(self):
rep = ""
degree = len(self.coeffs) - 1
rep += str(self.coeffs[0]) + "x^" + str(degree)
for i in range(1, len(self.coeffs)-1):
coeff = self.coeffs[i]
if coeff < 0:
rep += " - " + str(-coeff) + "x^" + str(degree - i)
else:
rep += " + " + str(coeff) + "x^" + str(degree - i)
if self.coeffs[-1] < 0:
rep += " - " + str(-self.coeffs[-1])
else:
rep += " + " + str(self.coeffs[-1])
return rep
You cannot directly add two lists.
def __add__(self, other):
assert type(other) is Polynomial
assert len(self.coeffs) != len(other.coeffs)
new_ceffs = [item1 + item2 for (item1, item2) in zip(self.coeffs, other.coeffs)]
return Polynomial(new_ceffs)
The problem is here:
num = self.coeffs + other.coeffs
Adding one list to another list concatenates them. To simply add corresponding elements to each other, you'd want to do
from itertools import zip_longest
...
num = [a + b for (a, b) in zip_longest(self.coeffs, other.coeffs, fillvalue=0)]
We use zip_longest() instead of the more generic zip() because one polynomial is possibly longer than the other and we don't want to ruin it. Either one of them will group together the corresponding elements so that we can easily add them and make a list of those.
You would do something similar for subtraction.
you should reverse the order of coefficients passed to your constructor so that indexes in the self.coeffs list correspond to the exponents. This would simplify the rest of your code and allow you to use zip_longest for additions and subtractions.
When you get to other operations however, I think you will realize that your internal structure would be easier to manage if it was a dictionary. A dictionary is more permissive of missing entries thus avoiding preoccupations of allocating spaces for new indexes.
class Polynomial(object):
def __init__(self, *coeffs):
self.coeffs = {exp:c for exp,c in enumerate(coeffs[::-1])}
def __add__(self, other):
assert type(other) is Polynomial
result = Polynomial(0)
result.coeffs = {**self.coeffs}
for exp,c in other.coeffs.items():
result.coeffs[exp] = result.coeffs.get(exp,0) + c
return result
def __sub__(self, other):
assert type(other) is Polynomial
result = Polynomial(0)
result.coeffs = {**self.coeffs}
for exp,c in other.coeffs.items():
result.coeffs[exp] = result.coeffs.get(exp,0) - c
return result
def __mul__(self, other):
assert type(other) is Polynomial
result = Polynomial(0)
for exp1,c1 in self.coeffs.items():
for exp2,c2 in other.coeffs.items():
result.coeffs[exp1+exp2] = result.coeffs.get(exp1+exp2,0) + c1*c2
return result
def __representation__(self):
return "Polynomial" + str(self.coeffs)
def __str__(self):
result = [""]+[f"{c}x^{i}" for i,c in sorted(self.coeffs.items()) if c]+[""]
result = "+".join(reversed(result))
result = result.replace("+1x","+x")
result = result.replace("-1x","-x")
result = result.replace("x^0","")
result = result.replace("x^1+","x+")
result = result.replace("+-","-")
result = result.strip("+")
result = result.replace("+"," + ")
result = result[:1]+result[1:].replace("-"," - ")
return result.strip()

Division of complex numbers in Python 3 class

I am having trouble creating a __div__ method in a Python class for complex numbers which should divide two complex numbers.
Here is my code:
class Complex(object):
def __init__(self, real = 0, imag = 0):
self.real = real
self.imag = imag
def __str__(self):
if self.imag > 0:
return str(self.real) + "+" + str(self.imag) + "i"
elif self.imag < 0:
return str(self.real) + str(self.imag) + "i"
def __div__(self, other):
x = self.real * other.real + self.imag * other.imag
y = self.imag * other.real - self.real * other.imag
z = other.real**2 + other.imag**2
real = x / z
imag = y / z
return Complex(real, imag)
no = Complex(2,-8)
no2 = Complex(3,7)
print(no/no2)
Unfortunately, my approach doesn't work. Any suggestions?
__div__ doesn't exist anymore in Python 3. It's been replaced by __truediv__ for / and __floordiv__ for //
Have a look at
https://docs.python.org/3/reference/datamodel.html
It's __truediv__, not __div__. __div__ was the name for the old Python 2 "floordiv for integer, truediv for non-integer" division.
While you're fixing things, you should probably add an else case in __str__.
you need to create a method __div__(self, other) where you divide the no and also a new method __opposite__(self) to change the sign when multipy,
also calling method how to devide ie no/no1 is not a god method
using #johnO solution, overwiting __truediv__
so OP can use no/no2 to division two complex numbers.
see code below
class Complex(object):
def init(self, real = 0, imag = 0):
self.real = real
self.imag = imag
def __str__(self):
if self.imag > 0:
return str(self.real) + "+" + str(self.imag) + "i"
elif self.imag < 0:
return str(self.real) + str(self.imag) + "i"
def __opposite__(self):
self.real =self.real
self.imag = self. imag if self.imag<0 else self.imag * -1
def __truediv__(self, other):
other.__opposite__()
x = self.real * other.real - self.imag * other.imag
y = self.imag * other.real + self.real * other.imag
z = other.real**2 + other.imag**2
self.new_real = x / z
self.new_imag = y / z
if self.new_imag>0:
result = "{} + {}i".format(self.new_real, self.new_imag)
else:
result = "{} {}i".format(self.new_real, self.new_imag)
return result
no = Complex(4,5)
no2 = Complex(2,6)
print(no/no2)
output
0.24 + 0.68i

Overloading Operators with MATLAB

I'm currently writing a code to perform Gaussian elimination in MATLAB and then write out the code needed to generate a LaTex file showing all the steps. A lot of the times when I do Gaussian elimination the answers start turning into Fractions. So I thought as a nice learning exercise for classes in Matlab that I would write a Fraction class. But I have no clue how to overload operators and frankly Mathwork's documentation wasn't helpful.
classdef Fraction
properties
numer
denom
end
methods
function a = Fraction(numer,denom)
a.denom = denom;
a.numer = numer;
end
function r = mtimes(a,b)
r = Fraction(a.numer*b.numer,a.denom*b.demon);
end
function r = plus(a,b)
c = a.numer*b.denom+a.denom*b.numer;
d = a.denom*b.denom;
r = Fraction(c,d);
function r = minus(a,b)
c = a.numer*b.denom-a.denom*b.numer;
d = a.denom*b.denom;
r = Fraction(c,d);
end
function r = mrdivide(a,b)
r = Fraction(a.numer*b.denom,a.denom*b.numer);
end
function b = reduceFrac(a)
x = a.numer;
y = b.denom;
while y ~= 0
x = y;
y = mod(x,y);
end
b =Fraction(a.numer/x, a.denom/x)
end
end
end
The plus operator works but the other three do not. Does any one have any ideas? Also how do I call my method reduceFrac?
Fraction.reduceFrac(Fraction(2.4))
I thought that the code above would work, but it didn't. Below is the python version of what I am trying to acheive.
Fraction.py
class Fraction(object):
"""Fraction class
Attributes:
numer: the numerator of the fraction.
denom: the denominator of the fraction.
"""
def __init__(self, numer, denom):
"""Initializes the Fraction class
Sets the inital numer and denom for the
fraction class.
Args:
numer: Top number of the Fraction
denom: Bottom number of the Fraction
Returns:
None
Raises:
None
"""
self.numer = numer
self.denom = denom
def __str__(self):
"""function call along with the print command
Args:
None
Returns:
String: numer / denom.
Raises:
None
"""
return str(self.numer) + '/' + str(self.denom)
def get_numer(self):
return self.numer
def set_numer(self, numer):
self.numer = numer
def get_denom(self):
return self.denom
def set_denom(self, denom):
self.denom = denom
def __add__(self, other):
numer = self.numer*other.denom+other.numer*self.denom
denom = self.denom*other.denom
return Fraction.reduceFrac(Fraction(numer,denom))
def __div__(self, other):
numer = self.numer*other.denom
denom = self.denom*other.numer
return Fraction.reduceFrac(Fraction(numer,denom))
def __sub__(self, other):
numer = self.numer*other.denom-other.numer*self.denom
denom = self.denom*other.denom
return Fraction.reduceFrac(Fraction(numer,denom))
def __mul__(self, other):
numer = self.numer*other.numer
denom = self.denom*other.denom
return Fraction.reduceFrac(Fraction(numer,denom))
def reduceFrac(self):
x = self.numer
y = self.denom
while y != 0:
(x, y) = (y, x % y)
return Fraction(self.numer/x, self.denom/x)
if __name__ == "__main__":
v = Fraction(4,3)
g = Fraction(7,8)
r = Fraction(4,8)
a = v + g
print a
s = v - g
print s
d = v / g
print d
m = v * g
print m
f = Fraction.reduceFrac(r)
print f
Your plus function misses an end

Categories

Resources