fft division for fast polynomial division - python

I'm trying to implement fast polynomial division using Fast Fourier Transform (fft).
Here is what I have got so far:
from numpy.fft import fft, ifft
def fft_div(C1, C2):
# fft expects right-most for significant coefficients
C1 = C1[::-1]
C2 = C2[::-1]
d = len(C1)+len(C2)-1
c1 = fft(list(C1) + [0] * (d-len(C1)))
c2 = fft(list(C2) + [0] * (d-len(C2)))
res = list(ifft(c1-c2)[:d].real)
# Reorder back to left-most and round to integer
return [int(round(x)) for x in res[::-1]]
This works well for polynomials of same length, but if length is different then the result is wrong (I benchmark against RosettaCode's extended_synthetic_division() function):
# Most signficant coefficient is left
N = [1, -11, 0, -22, 1]
D = [1, -3, 0, 1, 2]
# OK case, same length for both polynomials
fft_div(N, D)
>> [0, 0, 0, 0, 0, -8, 0, -23, -1]
extended_synthetic_division(N, D)
>> ([1], [-8, 0, -23, -1])
# NOT OK case, D is longer than N (also happens if shorter)
D = [1, -3, 0, 1, 2, 20]
fft_div(N, D)
>> [0, 0, 0, 0, -1, 4, -11, -1, -24, -19]
extended_synthetic_division(N, D)
>> ([], [1, -11, 0, -22, 1])
What is weird is that it seems it's very close, but still a bit off. What did I do wrong? In other words: how to generalize fast polynomial division (using FFT) to vectors of different sizes.
Also bonus if you can tell me how to compute the division quotient (currently I only have the remainder).

Here's a direct implementation of a fast polynomial division algorithm found in these lecture notes.
The division is based on the fast/FFT multiplication of dividend with the divisor's reciprocal. My implementation below strictly follows the algorithm proven to have O(n*log(n)) time complexity (for polynomials with degrees of the same order of magnitude), but it's written with emphasis on readability, not efficiency.
from math import ceil, log
from numpy.fft import fft, ifft
def poly_deg(p):
return len(p) - 1
def poly_scale(p, n):
"""Multiply polynomial ``p(x)`` with ``x^n``.
If n is negative, poly ``p(x)`` is divided with ``x^n``, and remainder is
discarded (truncated division).
"""
if n >= 0:
return list(p) + [0] * n
else:
return list(p)[:n]
def poly_scalar_mul(a, p):
"""Multiply polynomial ``p(x)`` with scalar (constant) ``a``."""
return [a*pi for pi in p]
def poly_extend(p, d):
"""Extend list ``p`` representing a polynomial ``p(x)`` to
match polynomials of degree ``d-1``.
"""
return [0] * (d-len(p)) + list(p)
def poly_norm(p):
"""Normalize the polynomial ``p(x)`` to have a non-zero most significant
coefficient.
"""
for i,a in enumerate(p):
if a != 0:
return p[i:]
return []
def poly_add(u, v):
"""Add polynomials ``u(x)`` and ``v(x)``."""
d = max(len(u), len(v))
return [a+b for a,b in zip(poly_extend(u, d), poly_extend(v, d))]
def poly_sub(u, v):
"""Subtract polynomials ``u(x)`` and ``v(x)``."""
d = max(len(u), len(v))
return poly_norm([a-b for a,b in zip(poly_extend(u, d), poly_extend(v, d))])
def poly_mul(u, v):
"""Multiply polynomials ``u(x)`` and ``v(x)`` with FFT."""
if not u or not v:
return []
d = poly_deg(u) + poly_deg(v) + 1
U = fft(poly_extend(u, d)[::-1])
V = fft(poly_extend(v, d)[::-1])
res = list(ifft(U*V).real)
return [int(round(x)) for x in res[::-1]]
def poly_recip(p):
"""Calculate the reciprocal of polynomial ``p(x)`` with degree ``k-1``,
defined as: ``x^(2k-2) / p(x)``, where ``k`` is a power of 2.
"""
k = poly_deg(p) + 1
assert k>0 and p[0] != 0 and 2**round(log(k,2)) == k
if k == 1:
return [1 / p[0]]
q = poly_recip(p[:k/2])
r = poly_sub(poly_scale(poly_scalar_mul(2, q), 3*k/2-2),
poly_mul(poly_mul(q, q), p))
return poly_scale(r, -k+2)
def poly_divmod(u, v):
"""Fast polynomial division ``u(x)`` / ``v(x)`` of polynomials with degrees
m and n. Time complexity is ``O(n*log(n))`` if ``m`` is of the same order
as ``n``.
"""
if not u or not v:
return []
m = poly_deg(u)
n = poly_deg(v)
# ensure deg(v) is one less than some power of 2
# by extending v -> ve, u -> ue (mult by x^nd)
nd = int(2**ceil(log(n+1, 2))) - 1 - n
ue = poly_scale(u, nd)
ve = poly_scale(v, nd)
me = m + nd
ne = n + nd
s = poly_recip(ve)
q = poly_scale(poly_mul(ue, s), -2*ne)
# handle the case when m>2n
if me > 2*ne:
# t = x^2n - s*v
t = poly_sub(poly_scale([1], 2*ne), poly_mul(s, ve))
q2, r2 = poly_divmod(poly_scale(poly_mul(ue, t), -2*ne), ve)
q = poly_add(q, q2)
# remainder, r = u - v*q
r = poly_sub(u, poly_mul(v, q))
return q, r
The poly_divmod(u, v) function returns a (quotient, remainder) tuple for polynomials u and v (like Python's standard divmod for numbers).
For example:
>>> print poly_divmod([1,0,-1], [1,-1])
([1, 1], [])
>>> print poly_divmod([3,-5,10,8], [1,2,-3])
([3, -11], [41, -25])
>>> print poly_divmod([1, -11, 0, -22, 1], [1, -3, 0, 1, 2])
([1], [-8, 0, -23, -1])
>>> print poly_divmod([1, -11, 0, -22, 1], [1, -3, 0, 1, 2, 20])
([], [1, -11, 0, -22, 1])
I.e:
(x^2 - 1) / (x - 1) == x + 1
(2x^3 - 5x^2 + 10x + 8) / (x^2 + 2x -3) == 3x - 11, with remainder 41x - 25
etc. (Last two examples are yours.)

Related

numpy qr factorization problems

I'm trying to extract rotation matrix from affine transform matrix (via QR decomposition), but it gives me wrong rotations. After applying rotation vectors should have zero angle between them. I have no idea what is going wrong.
Here is the code that is currently giving the unexpected output:
import numpy as np
import math
np.set_printoptions(suppress=True)
def compute_affine(ins, out): # matrices 3x4
# https://stackoverflow.com/questions/8873462/how-to-perform-coordinates-affine-transformation-using-python
# finding transformation
l = len(ins)
entry = lambda r, d: np.linalg.det(np.delete(np.vstack([r, ins.T, np.ones(l)]), d, axis=0))
M = np.array([[(-1) ** i * entry(R, i) for R in out.T] for i in range(l + 1)])
print(M)
A, t = np.hsplit(M[1:].T / (-M[0])[:, None], [l - 1])
t = np.transpose(t)[0]
# output transformation
print("Affine transformation matrix:\n", A)
print("Affine transformation translation vector:\n", t)
# unittests
print("TESTING:")
for p, P in zip(np.array(ins), np.array(out)):
image_p = np.dot(A, p) + t
result = "[OK]" if np.allclose(image_p, P) else "[ERROR]"
print(p, " mapped to: ", image_p, " ; expected: ", P, result)
return A, t
def dot_product_angle(v1,v2):
if np.linalg.norm(v1) == 0 or np.linalg.norm(v2) == 0:
print("Zero magnitude vector!")
else:
vector_dot_product = np.dot(v1,v2)
arccos = np.arccos(vector_dot_product / (np.linalg.norm(v1) * np.linalg.norm(v2)))
angle = np.degrees(arccos)
return angle
return 0
if __name__=="__main__":
import numpy as np
src = np.array([
[1, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
])
uv = np.array([
[0.1, 0],
[0, -0.1],
[0.1, -0.1],
[1, 1]
])
angle = 45
rot_matrix = np.array([[math.cos(angle), -math.sin(angle), 0 ],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1]])
dst = np.dot(src, rot_matrix)
# compute affine matrices
src_A, src_t = compute_affine(src, uv)
dst_A, dst_t = compute_affine(dst, uv)
src_Q, src_R = np.linalg.qr(np.vstack([src_A, np.cross(src_A[0], src_A[1])]))
dst_Q, dst_R = np.linalg.qr(np.vstack([dst_A, np.cross(dst_A[0], dst_A[1])]))
vec1 = np.dot(src_Q[:-1], src[0])
vec2 = np.dot(dst_Q[:-1], dst[0])
if not dot_product_angle(vec1, vec2) == 0:
raise Exception("Angle is invalid should be zero")

Polynomial which satisfies integral and two points

Consider two points (x_0, f_0) and (x_1, f_1)
let p(x) be the degree two polynomial for which
p(x_0) = f_0
p(x_1) = f_1
and the integral of p(x) from -1 to 1 is equal to 0
Write a function which accepts two arguments
1. a length 2 NumPy vector 'x' of floating point values, with 'x[i]' containing the value of x_i,
2. a length 2 NumPy vector 'f' of floating point values, with 'f[i]' containing the value of f_i,
and which returns
a length 3 NumPy vector of floating point values containing the power series coefficients, in order from the highest order term to the constant term, for p(x)
I'm not sure where to start. My intial thought would be to have a differential equation P(1)=P(-1) with initial values p(x_0) = f_0 and p(x_1) = f_1, but I'm also having issues with the implementation.
Using sympy, Python's symbolic math library, the problem can be formulated as follows:
from sympy import symbols Eq, solve, integrate
def give_coeff(x, f):
a, b, c, X = symbols('a, b, c, X')
F = a * X * X + b * X + c # we have a second order polynomial
sol = solve([Eq(integrate(F, (X, -1, 1)), 0), # the integral should be zero (2/3*a + 2*c)
Eq(F.subs(X, x[0]), f[0]), # filling in x[0] should give f[0]
Eq(F.subs(X, x[1]), f[1])], # filling in x[1] should give f[1]
(a, b, c)) # solve for a, b and c
return sol[a].evalf(), sol[b].evalf(), sol[c].evalf()
import numpy as np
coeff = give_coeff(np.array([1, 2]), np.array([3, 4]))
print(coeff)
The code can even be expanded to polynomials of any degree:
from sympy import Eq, solve, symbols, integrate
def give_coeff(x, f):
assert len(x) == len(f), "x and f need to have the same length"
degree = len(x)
X = symbols('X')
a = [symbols(f'a_{i}') for i in range(degree + 1)]
F = 0
for ai in a[::-1]:
F = F * X + ai
sol = solve([Eq(integrate(F, (X, -1, 1)), 0)] +
[Eq(F.subs(X, xi), fi) for xi, fi in zip(x, f)],
(*a,))
# print(sol)
# print(F.subs(sol).expand())
return [sol[ai].evalf() for ai in a[::-1]]
import numpy as np
coeff = give_coeff(np.array([1, 2]), np.array([3, 4]))
print(coeff)
print(give_coeff(np.array([1, 2, 3, 4, 5]), np.array([3, 4, 6, 9, 1])))
PS: To solve the second degree equation only using numpy, np.linalg.solve can be used to solve the linear system of 3 unknowns with 3 equations. The equations need to be "hand calculated" which is are more error prone and more elaborated to extend to higher degrees.
import numpy as np
def np_give_coeff(x, f):
# general equation: F = a*X**2 + b*X + c
# 3 equations:
# integral (F, (X, -1, 1)) == 0 or (2/3*a + 2*c) == 0
# a*x[0]**2 + b*x[0] + c == f[0]
# a*x[1]**2 + b*x[1] + c == f[1]
A = np.array([[2/3, 0, 2],
[x[0]**2, x[0], 1],
[x[1]**2, x[1], 1]])
B = np.array([0, f[0], f[1]])
return np.linalg.solve(A, B)
coeff = np_give_coeff(np.array([1, 2]), np.array([3, 4]))
print(coeff)
You can solve this generically, taking advantage of the fact that
and adding that as a constraint. Then you have 3 equations for 3 unknowns (a, b, c).
There are other interesting tricks, it is a neat question. Try playing around with writing your formula in terms of a(x-b)(x-c), then you have 3bc + 1 = 0., also any solution starting with points (x0,y0),(x1,x1) has a similar solution for (k*x0,k*y0),(k*x1,k*y1).

I keep getting this error: line 57 List index out of range

import pprint
def mult_matrix(M, N):
"""Multiply square matrices of same dimension M and N"""
# Converts N into a list of tuples of columns
tuple_N = zip(*N)
# Nested list comprehension to calculate matrix multiplication
return [[sum(el_m * el_n for el_m, el_n in zip(row_m, col_n)) for col_n in tuple_N] for row_m in M]
def pivot_matrix(M):
"""Returns the pivoting matrix for M, used in Doolittle's method."""
m = len(M)
# Create an identity matrix, with floating point values
id_mat = [[float(i ==j) for i in range(m)] for j in range(m)]
# Rearrange the identity matrix such that the largest element of
# each column of M is placed on the diagonal of of M
for j in range(m):
row = max(range(j, m), key=lambda i: abs(M[i][j]))
if j != row:
# Swap the rows
id_mat[j], id_mat[row] = id_mat[row], id_mat[j]
return id_mat
def lu_decomposition(A):
"""Performs an LU Decomposition of A (which must be square)
into PA = LU. The function returns P, L and U."""
n = len(A)
# Create zero matrices for L and U
L = [[0.0] * n for i in range(0,n)]
U = [[0.0] * n for i in range(0,n)]
# Create the pivot matrix P and the multipled matrix PA
P = pivot_matrix(A)
PA = mult_matrix(P, A)
# Perform the LU Decomposition
for j in range(n):
# All diagonal entries of L are set to unity
L[j][j] = 1.0
# LaTeX: u_{ij} = a_{ij} - \sum_{k=1}^{i-1} u_{kj} l_{ik}
for i in range(j+1):
s1 = sum(U[k][j] * L[i][k] for k in range(i))
U[i][j] = PA[i][j] - s1
# LaTeX: l_{ij} = \frac{1}{u_{jj}} (a_{ij} - \sum_{k=1}^{j-1} u_{kj} l_{ik} )
for i in range(j, n):
s2 = sum(U[k][j] * L[i][k] for k in range(j))
L[i][j] = (PA[i][j] - s2) / U[j][j]
return (P, L, U)
A = [[7, 3, -1, 2], [3, 8, 1, -4], [-1, 1, 4, -1], [2, -4, -1, 6]]
P, L, U = lu_decomposition(A)
print ("A:")
pprint.pprint(A)
print ("P:")
pprint.pprint(P)
print ("L:")
pprint.pprint(L)
print ("U:")
pprint.pprint(U)
I belive the error is in the following for loop
for i in range(j+1):
s1 = sum(U[k][j] * L[i][k] for k in range(i))
U[i][j] = PA[i][j] - s1
As the first loop goes from (for j in range(n):) when j==n the second loop goes to n+1 raising the exception. Hope it helps

How might I check whether 2 polynomials are congruent modulo (h(x), n)?

I am currently trying to write my own implementation of the AKS algorithm. The pseudocode for this (taken directly from the paper 'PRIMES is in P') may be seen here.
The part of this that I am struggling with is the code within the if statement on line 5. This requires us to check whether
(x+a)^n = x^n + a ( mod x^r - 1, n )
Does anyone know how I might do this (in python)? I believe that this congruence is equivalent to saying that there exist polynomials q(x) and r(x) such that
f(x) = g(x) + (x^r - 1) * q(x) + n * r(x)
though I'm not certain of this.
I have attempted to replicate this if statement using python and the sympy package with the following code
if(sym.div(sym.div(mod_zero, x**r - 1)[1], n)[1] == 0):
print("Congruent")
Your interpretation f(x) = g(x) + (x^r - 1) * q(x) + n * r(x) is not incorrect, if g is understood to be zero, and q and r have integer coefficients. But it's really two steps: taking the remainder of polynomial division by (x^r - 1), and then applying mod n to the coefficients.
In SymPy terms, the comparison is
trunc(rem((x + a)**n -(x**n + a), x**r - 1), n) == 0
where rem find the polynomial remainder, and trunc takes coefficients mod n. Examples:
x = poly("x")
n = 35
r = 29
a = 7
trunc(rem((x + a)**n - (x**n + a), x**r - 1), n)
outputs Poly(14*x**25 + 7*x**10 - 7*x**5 + 14*x - 14, x, domain='ZZ')
while, replacing 35 by 31, we get Poly(0, x, domain='ZZ'), which passes the == 0 test.
Speedup
One way to optimize is to also apply trunc before rem, to make the coefficients smaller prior to division.
trunc(rem(trunc((x + a)**n - (x**n + a), n), x**r - 1), n)
This helps a bit. But the more substantial speedup can be achieved by using low-level routines from "galoistools" module. They operate with coefficients as lists, like this: [1, a] is x + a.
from sympy.polys.galoistools import gf_lshift, gf_sub, gf_add_ground, gf_pow, gf_rem
n = 35
r = 29
a = 7
f1 = gf_pow([1, a], n, n, ZZ) # (x + a)**n
f2 = gf_add_ground(gf_lshift([1], n, ZZ), a, n, ZZ) # x**n + a
g = gf_add_ground(gf_lshift([1], r, ZZ), -1, n, ZZ) # x**r - 1
print(gf_rem(gf_sub(f1, f2, n, ZZ), g, n, ZZ))
prints [14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 28, 0, 0, 0, 14, 21] which agrees (modulo 35) with the earlier result.
The zero polynomial is [] in this representation: so, the test could be as simple as
if gf_rem(gf_sub(f1, f2, n, ZZ), g, n, ZZ):
print("Composite") # [] is falsy, other lists are truthy
The galoistools code is less elegant, but is an order of magnitude faster.

Interpolate polynomial over a finite field

I want to use python interpolate polynomial on points from a finite-field and get a polynomial with coefficients in that field.
Currently I'm trying to use SymPy and specifically interpolate (from sympy.polys.polyfuncs), but I don't know how to force the interpolation to happen in a specific gf. If not, can this be done with another module?
Edit: I'm interested in a Python implementation/library.
SymPy's interpolating_poly does not support polynomials over finite fields. But there are enough details under the hood of SymPy to put together a class for finite fields, and find the coefficients of Lagrange polynomial in a brutally direct fashion.
As usual, the elements of finite field GF(pn) are represented by polynomials of degree less than n, with coefficients in GF(p). Multiplication is done modulo a reducing polynomial of degree n, which is selected at the time of field construction. Inversion is done with extended Euclidean algorithm.
The polynomials are represented by lists of coefficients, highest degrees first. For example, the elements of GF(32) are:
[], [1], [2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]
The empty list represents 0.
Class GF, finite fields
Implements arithmetics as methods add, sub, mul, inv (multiplicative inverse). For convenience of testing interpolation includes eval_poly which evaluates a given polynomial with coefficients in GF(pn) at a point of GF(pn).
Note that the constructor is used as G(3, 2), not as G(9), - the prime and its power are supplied separately.
import itertools
from functools import reduce
from sympy import symbols, Dummy
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import (gf_irreducible_p, gf_add, \
gf_sub, gf_mul, gf_rem, gf_gcdex)
from sympy.ntheory.primetest import isprime
class GF():
def __init__(self, p, n=1):
p, n = int(p), int(n)
if not isprime(p):
raise ValueError("p must be a prime number, not %s" % p)
if n <= 0:
raise ValueError("n must be a positive integer, not %s" % n)
self.p = p
self.n = n
if n == 1:
self.reducing = [1, 0]
else:
for c in itertools.product(range(p), repeat=n):
poly = (1, *c)
if gf_irreducible_p(poly, p, ZZ):
self.reducing = poly
break
def add(self, x, y):
return gf_add(x, y, self.p, ZZ)
def sub(self, x, y):
return gf_sub(x, y, self.p, ZZ)
def mul(self, x, y):
return gf_rem(gf_mul(x, y, self.p, ZZ), self.reducing, self.p, ZZ)
def inv(self, x):
s, t, h = gf_gcdex(x, self.reducing, self.p, ZZ)
return s
def eval_poly(self, poly, point):
val = []
for c in poly:
val = self.mul(val, point)
val = self.add(val, c)
return val
Class PolyRing, polynomials over a field
This one is simpler: it implements addition, subtraction, and multiplication of polynomials, referring to the ground field for operations on coefficients. There is a lot of list reversals [::-1] because of SymPy's convention to list monomials starting with highest powers.
class PolyRing():
def __init__(self, field):
self.K = field
def add(self, p, q):
s = [self.K.add(x, y) for x, y in \
itertools.zip_longest(p[::-1], q[::-1], fillvalue=[])]
return s[::-1]
def sub(self, p, q):
s = [self.K.sub(x, y) for x, y in \
itertools.zip_longest(p[::-1], q[::-1], fillvalue=[])]
return s[::-1]
def mul(self, p, q):
if len(p) < len(q):
p, q = q, p
s = [[]]
for j, c in enumerate(q):
s = self.add(s, [self.K.mul(b, c) for b in p] + \
[[]] * (len(q) - j - 1))
return s
Construction of interpolating polynomial.
The Lagrange polynomial is constructed for given x-values in list X and corresponding y-values in array Y. It is a linear combination of basis polynomials, one for each element of X. Each basis polynomial is obtained by multiplying (x-x_k) polynomials, represented as [[1], K.sub([], x_k)]. The denominator is a scalar, so it's even easier to compute.
def interp_poly(X, Y, K):
R = PolyRing(K)
poly = [[]]
for j, y in enumerate(Y):
Xe = X[:j] + X[j+1:]
numer = reduce(lambda p, q: R.mul(p, q), ([[1], K.sub([], x)] for x in Xe))
denom = reduce(lambda x, y: K.mul(x, y), (K.sub(X[j], x) for x in Xe))
poly = R.add(poly, R.mul(numer, [K.mul(y, K.inv(denom))]))
return poly
Example of usage:
K = GF(2, 4)
X = [[], [1], [1, 0, 1]] # 0, 1, a^2 + 1
Y = [[1, 0], [1, 0, 0], [1, 0, 0, 0]] # a, a^2, a^3
intpoly = interp_poly(X, Y, K)
pprint(intpoly)
pprint([K.eval_poly(intpoly, x) for x in X]) # same as Y
The pretty print is just to avoid some type-related decorations on the output. The polynomial is shown as [[1], [1, 1, 1], [1, 0]]. To help readability, I added a function to turn this in a more familiar form, with a symbol a being a generator of finite field, and x being the variable in the polynomial.
def readable(poly, a, x):
return Poly(sum((sum((c*a**j for j, c in enumerate(coef[::-1])), S.Zero) * x**k \
for k, coef in enumerate(poly[::-1])), S.Zero), x)
So we can do
a, x = symbols('a x')
print(readable(intpoly, a, x))
and get
Poly(x**2 + (a**2 + a + 1)*x + a, x, domain='ZZ[a]')
This algebraic object is not a polynomial over our field, this is just for the sake of readable output.
Sage
As an alternative, or just another safety check, one can use the lagrange_polynomial from Sage for the same data.
field = GF(16, 'a')
a = field.gen()
R = PolynomialRing(field, "x")
points = [(0, a), (1, a^2), (a^2+1, a^3)]
R.lagrange_polynomial(points)
Output: x^2 + (a^2 + a + 1)*x + a
I'm the author of the galois Python library. Polynomial interpolation can be performed with the lagrange_poly() function. Here's a simple example.
In [1]: import galois
In [2]: galois.__version__
Out[2]: '0.0.32'
In [3]: GF = galois.GF(3**5)
In [4]: x = GF.Random(10); x
Out[4]: GF([ 33, 58, 59, 21, 141, 133, 207, 182, 125, 162], order=3^5)
In [5]: y = GF.Random(10); y
Out[5]: GF([ 34, 239, 120, 170, 31, 165, 180, 79, 215, 215], order=3^5)
In [6]: f = galois.lagrange_poly(x, y); f
Out[6]: Poly(165x^9 + 96x^8 + 9x^7 + 111x^6 + 40x^5 + 208x^4 + 55x^3 + 17x^2 + 118x + 203, GF(3^5))
In [7]: f(x)
Out[7]: GF([ 34, 239, 120, 170, 31, 165, 180, 79, 215, 215], order=3^5)
The finite field element display may be changed to either the polynomial or power representation.
In [8]: GF.display("poly"); f(x)
Out[8]:
GF([ α^3 + 2α + 1, 2α^4 + 2α^3 + 2α^2 + α + 2,
α^4 + α^3 + α^2 + α, 2α^4 + 2α + 2,
α^3 + α + 1, 2α^4 + α,
2α^4 + 2α^2, 2α^3 + 2α^2 + 2α + 1,
2α^4 + α^3 + 2α^2 + 2α + 2, 2α^4 + α^3 + 2α^2 + 2α + 2], order=3^5)
In [9]: GF.display("power"); f(x)
Out[9]:
GF([α^198, α^162, α^116, α^100, α^214, α^137, α^169, α^95, α^175, α^175],
order=3^5)

Categories

Resources