Ryan Rueger

ryan@rueg.re / picture / key / home
aboutsummaryrefslogtreecommitdiffhomepage
path: root/theta_lib/utilities
diff options
context:
space:
mode:
authorRyan Rueger <git@rueg.re>2025-03-01 20:25:41 +0100
committerRyan Rueger <git@rueg.re>2025-03-01 22:11:11 +0100
commitd40de259097c5e8d8fd35539560ca7c3d47523e7 (patch)
tree18e0f94350a2329060c2a19b56b0e3e2fdae56f1 /theta_lib/utilities
downloadpegasis-d40de259097c5e8d8fd35539560ca7c3d47523e7.tar.gz
pegasis-d40de259097c5e8d8fd35539560ca7c3d47523e7.tar.bz2
pegasis-d40de259097c5e8d8fd35539560ca7c3d47523e7.zip
Initial Commit
Co-Authored-By: Damien Robert <Damien.Olivier.Robert+git@gmail.com> Co-Authored-By: Frederik Vercauteren <frederik.vercauteren@gmail.com> Co-Authored-By: Jonathan Komada Eriksen <jonathan.eriksen97@gmail.com> Co-Authored-By: Pierrick Dartois <pierrickdartois@icloud.com> Co-Authored-By: Riccardo Invernizzi <nidadoni@gmail.com> Co-Authored-By: Ryan Rueger <git@rueg.re> [0.01s] Co-Authored-By: Benjamin Wesolowski <benjamin@pasch.umpa.ens-lyon.fr> Co-Authored-By: Arthur Herlédan Le Merdy <ahlm@riseup.net> Co-Authored-By: Boris Fouotsa <tako.fouotsa@epfl.ch>
Diffstat (limited to 'theta_lib/utilities')
-rw-r--r--theta_lib/utilities/discrete_log.py273
-rw-r--r--theta_lib/utilities/fast_sqrt.py55
-rw-r--r--theta_lib/utilities/order.py117
-rw-r--r--theta_lib/utilities/strategy.py229
-rw-r--r--theta_lib/utilities/supersingular.py413
5 files changed, 1087 insertions, 0 deletions
diff --git a/theta_lib/utilities/discrete_log.py b/theta_lib/utilities/discrete_log.py
new file mode 100644
index 0000000..dff04dc
--- /dev/null
+++ b/theta_lib/utilities/discrete_log.py
@@ -0,0 +1,273 @@
+# ===================================== #
+# Fast DLP solving using Weil pairing #
+# ===================================== #
+
+"""
+This code has been taken from:
+https://github.com/FESTA-PKE/FESTA-SageMath
+
+Copyright (c) 2023 Andrea Basso, Luciano Maino and Giacomo Pope.
+"""
+
+
+
+# Sage imports
+from sage.all import (
+ ZZ
+)
+
+# import pari for fast dlog
+import cypari2
+
+# Make instance of Pari
+pari = cypari2.Pari()
+
+def discrete_log_pari(a, base, order):
+ """
+ Wrapper around pari discrete log. Works like a.log(b),
+ but allows us to use the optional argument order. This
+ is important as we skip Pari attempting to factor the
+ full order of Fp^k, which is slow.
+ """
+ x = pari.fflog(a, base, order)
+ return ZZ(x)
+
+def ell_discrete_log_pari(E,P,Base,order):
+ """
+ Wrapper around pari elllog function to compute discrete
+ logarithms in elliptic curves over finite fields.
+ Works like P.log(Base).
+ """
+ x = pari.elllog(E,P,Base,order)
+ return ZZ(x)
+
+def weil_pairing_pari(P, Q, D, check=False):
+ """
+ Wrapper around Pari's implementation of the Weil pairing
+ Allows the check of whether P,Q are in E[D] to be optional
+ """
+ if check:
+ nP, nQ = D*P, D*Q
+ if nP.is_zero() or nQ.is_zero():
+ raise ValueError("points must both be n-torsion")
+
+ return pari.ellweilpairing(P.curve(), P, Q, D)
+
+def tate_pairing_pari(P, Q, D):
+ """
+ Wrapper around Pari's implementation of the Tate pairing
+ NOTE: this is not the reduced Tate pairing, so to make this
+ match with SageMath you need
+
+ P.tate_pairing(Q, D, k) == pari.elltatepairing(E, P, Q, D)**((p^k - 1) / D)
+ """
+ E = P.curve()
+ return pari.elltatepairing(E, P, Q, D)
+
+def _precompute_baby_steps(base, step, e):
+ """
+ Helper function to compute the baby steps for
+ pohlig_hellman_base and windowed_pohlig_hellman.
+ """
+ baby_steps = [base]
+ for _ in range(e):
+ base = base**step
+ baby_steps.append(base)
+ return baby_steps
+
+def pohlig_hellman_base(a, base, e):
+ """
+ Solve the discrete log for a = base^x for
+ elements base,a of order 2^e using the
+ Pohlig-Hellman algorithm.
+ """
+ baby_steps = _precompute_baby_steps(base, 2, e)
+
+ dlog = 0
+ exp = 2**(e-1)
+
+ # Solve the discrete log mod 2, only 2 choices
+ # for each digit!
+ for i in range(e):
+ if a**exp != 1:
+ a /= baby_steps[i]
+ dlog += (2**i)
+
+ if a == 1:
+ break
+
+ exp //= 2
+
+ return dlog
+
+def windowed_pohlig_hellman(a, base, e, window):
+ """
+ Solve the discrete log for a = base^x for
+ elements base,a of order 2^e using the
+ windowed Pohlig-Hellman algorithm following
+ https://ia.cr/2016/963.
+
+ Algorithm runs recursively, computing in windows
+ l^wi for window=[w1, w2, w3, ...].
+
+ Runs the base case when window = []
+ """
+ # Base case when all windows have been used
+ if not window:
+ return pohlig_hellman_base(a, base, e)
+
+ # Collect the next window
+ w, window = window[0], window[1:]
+ step = 2**w
+
+ # When the window is not a divisor of e, we compute
+ # e mod w and solve for both the lower e - e mod w
+ # bits and then the e mod w bits at the end.
+ e_div_w, e_rem = divmod(e, w)
+ e_prime = e - e_rem
+
+ # First force elements to have order e - e_rem
+ a_prime = a**(2**e_rem)
+ base_prime = base**(2**e_rem)
+
+ # Compute base^(2^w*i) for i in (0, ..., e/w-1)
+ baby_steps = _precompute_baby_steps(base_prime, step, e_div_w)
+
+ # Initialise some pieces
+ dlog = 0
+ if e_prime:
+ exp = 2**(e_prime - w)
+
+ # Work in subgroup of size 2^w
+ s = base_prime**(exp)
+
+ # Windowed Pohlig-Hellman to recover dlog as
+ # alpha = sum l^(i*w) * alpha_i
+ for i in range(e_div_w):
+ # Solve the dlog in 2^w
+ ri = a_prime**exp
+ alpha_i = windowed_pohlig_hellman(ri, s, w, window)
+
+ # Update a value and dlog computation
+ a_prime /= baby_steps[i]**(alpha_i)
+ dlog += alpha_i * step**i
+
+ if a_prime == 1:
+ break
+
+ exp //= step
+
+ # TODO:
+ # I don't know if this is a nice way to do
+ # this last step... Works well enough but could
+ # be improved I imagine...
+ exp = 2**e_prime
+ if e_rem:
+ base_last = base**exp
+ a_last = a / base**dlog
+ dlog_last = pohlig_hellman_base(a_last, base_last, e_rem)
+ dlog += exp * dlog_last
+
+ return dlog
+
+def BiDLP(R, P, Q, D, ePQ=None):
+ """
+ Given a basis P,Q of E[D] finds
+ a,b such that R = [a]P + [b]Q.
+
+ Uses the fact that
+ e([a]P + [b]Q, [c]P + [d]Q) = e(P,Q)^(ad-bc)
+
+ Optional: include the pairing e(P,Q) which can be precomputed
+ which is helpful when running multiple BiDLP problems with P,Q
+ as input. This happens, for example, during compression.
+ """
+ # e(P,Q)
+ if ePQ:
+ pair_PQ = ePQ
+ else:
+ pair_PQ = weil_pairing_pari(P, Q, D)
+
+ # Write R = aP + bQ for unknown a,b
+ # e(R, Q) = e(P, Q)^a
+ pair_a = weil_pairing_pari(R, Q, D)
+
+ # e(R,-P) = e(P, Q)^b
+ pair_b = weil_pairing_pari(R, -P, D)
+
+ # Now solve the dlog in Fq
+ a = discrete_log_pari(pair_a, pair_PQ, D)
+ b = discrete_log_pari(pair_b, pair_PQ, D)
+
+ return a, b
+
+def BiDLP_power_two(R, P, Q, e, window, ePQ=None):
+ r"""
+ Same as the above, but uses optimisations using that
+ D = 2^e.
+
+ First, rather than use the Weil pairing, we can use
+ the Tate pairing which is approx 2x faster.
+
+ Secondly, rather than solve the discrete log naively,
+ we use an optimised windowed Pohlig-Hellman.
+
+ NOTE: this could be optimised further, following the
+ SIKE key-compression algorithms and for the Tate pairing
+ we could reuse Miller loops to save even more time, but
+ that seemed a little overkill for a SageMath PoC
+
+ Finally, as the Tate pairing produces elements in \mu_n
+ we also have fast inversion from conjugation, but SageMath
+ has slow conjugation, so this doesn't help for now.
+ """
+ p = R.curve().base_ring().characteristic()
+ D = 2**e
+ exp = (p**2 - 1) // D
+
+ # e(P,Q)
+ if ePQ:
+ pair_PQ = ePQ
+ else:
+ pair_PQ = tate_pairing_pari(P, Q, D)**exp
+
+ # Write R = aP + bQ for unknown a,b
+ # e(R, Q) = e(P, Q)^a
+ pair_a = tate_pairing_pari(Q, -R, D)**exp
+
+ # e(R,-P) = e(P, Q)^b
+ pair_b = tate_pairing_pari(P, R, D)**exp
+
+ # Now solve the dlog in Fq
+ a = windowed_pohlig_hellman(pair_a, pair_PQ, e, window)
+ b = windowed_pohlig_hellman(pair_b, pair_PQ, e, window)
+
+ return a, b
+
+def DLP_power_two(R, P, Q, e, window, ePQ=None, first=True):
+ r"""
+ This is the same as BiDLP but it only returns either a or b
+ depending on whether first is true or false.
+ This is used in compression, where we only send 3 of the 4
+ scalars from BiDLP
+ """
+ p = R.curve().base_ring().characteristic()
+ D = 2**e
+ exp = (p**2 - 1) // D
+
+ # e(P,Q)
+ if ePQ:
+ pair_PQ = ePQ
+ else:
+ pair_PQ = tate_pairing_pari(P, Q, D)**exp
+
+ if first:
+ pair_a = tate_pairing_pari(Q, -R, D)**exp
+ x = windowed_pohlig_hellman(pair_a, pair_PQ, e, window)
+
+ else:
+ pair_b = tate_pairing_pari(P, R, D)**exp
+ x = windowed_pohlig_hellman(pair_b, pair_PQ, e, window)
+
+ return x
+
diff --git a/theta_lib/utilities/fast_sqrt.py b/theta_lib/utilities/fast_sqrt.py
new file mode 100644
index 0000000..0609fe5
--- /dev/null
+++ b/theta_lib/utilities/fast_sqrt.py
@@ -0,0 +1,55 @@
+
+# ============================================ #
+# Fast square root and quadratic roots #
+# ============================================ #
+
+"""
+Most of this code has been taken from:
+https://github.com/FESTA-PKE/FESTA-SageMath
+
+Copyright (c) 2023 Andrea Basso, Luciano Maino and Giacomo Pope.
+
+Functions with another Copyright mention are not from the above authors.
+"""
+
+def sqrt_Fp2(a):
+ """
+ Efficiently computes the sqrt
+ of an element in Fp2 using that
+ we always have a prime p such that
+ p ≡ 3 mod 4.
+ """
+ Fp2 = a.parent()
+ p = Fp2.characteristic()
+ i = Fp2.gen() # i = √-1
+
+ a1 = a ** ((p - 3) // 4)
+ x0 = a1 * a
+ alpha = a1 * x0
+
+ if alpha == -1:
+ x = i * x0
+ else:
+ b = (1 + alpha) ** ((p - 1) // 2)
+ x = b * x0
+
+ return x
+
+def n_sqrt(a, n):
+ for _ in range(n):
+ a = sqrt_Fp2(a)
+ return a
+
+def sqrt_Fp(a):
+ """
+ Efficiently computes the sqrt
+ of an element in Fp using that
+ we always have a prime p such that
+ p ≡ 3 mod 4.
+
+ Copyright (c) Pierrick Dartois 2025.
+ """
+ Fp = a.parent()
+ p = Fp.characteristic()
+
+ return a**((p+1)//4)
diff --git a/theta_lib/utilities/order.py b/theta_lib/utilities/order.py
new file mode 100644
index 0000000..223f568
--- /dev/null
+++ b/theta_lib/utilities/order.py
@@ -0,0 +1,117 @@
+# ================================================== #
+# Code to check whether a group element has order D #
+# ================================================== #
+
+"""
+This code has been taken from:
+https://github.com/FESTA-PKE/FESTA-SageMath
+
+Copyright (c) 2023 Andrea Basso, Luciano Maino and Giacomo Pope.
+"""
+
+from sage.all import(
+ ZZ,
+ prod,
+ cached_function
+)
+
+def batch_cofactor_mul_generic(G_list, pis, group_action, lower, upper):
+ """
+ Input: A list of elements `G_list`, such that
+ G is the first entry and the rest is empty
+ in the sublist G_list[lower:upper]
+ A list `pis` of primes p such that
+ their product is D
+ The `group_action` of the group
+ Indices lower and upper
+ Output: None
+
+ NOTE: G_list is created in place
+ """
+
+ # check that indices are valid
+ if lower > upper:
+ raise ValueError(f"Wrong input to cofactor_multiples()")
+
+ # last recursion step does not need any further splitting
+ if upper - lower == 1:
+ return
+
+ # Split list in two parts,
+ # multiply to get new start points for the two sublists,
+ # and call the function recursively for both sublists.
+ mid = lower + (upper - lower + 1) // 2
+ cl, cu = 1, 1
+ for i in range(lower, mid):
+ cu = cu * pis[i]
+ for i in range(mid, upper):
+ cl = cl * pis[i]
+ # cl = prod(pis[lower:mid])
+ # cu = prod(pis[mid:upper])
+
+ G_list[mid] = group_action(G_list[lower], cu)
+ G_list[lower] = group_action(G_list[lower], cl)
+
+ batch_cofactor_mul_generic(G_list, pis, group_action, lower, mid)
+ batch_cofactor_mul_generic(G_list, pis, group_action, mid, upper)
+
+
+@cached_function
+def has_order_constants(D):
+ """
+ Helper function, finds constants to
+ help with has_order_D
+ """
+ D = ZZ(D)
+ pis = [p for p, _ in D.factor()]
+ D_radical = prod(pis)
+ Dtop = D // D_radical
+ return Dtop, pis
+
+
+def has_order_D(G, D, multiplicative=False):
+ """
+ Given an element G in a group, checks if the
+ element has order exactly D. This is much faster
+ than determining its order, and is enough for
+ many checks we need when computing the torsion
+ basis.
+
+ We allow both additive and multiplicative groups
+ which means we can use this when computing the order
+ of points and elements in Fp^k when checking the
+ multiplicative order of the Weil pairing output
+ """
+ # For the case when we work with elements of Fp^k
+ if multiplicative:
+ group_action = lambda a, k: a**k
+ is_identity = lambda a: a == 1
+ identity = 1
+ # For the case when we work with elements of E / Fp^k
+ else:
+ group_action = lambda a, k: k * a
+ is_identity = lambda a: a.is_zero()
+ identity = G.curve()(0)
+
+ if is_identity(G):
+ return False
+
+ D_top, pis = has_order_constants(D)
+
+ # If G is the identity after clearing the top
+ # factors, we can abort early
+ Gtop = group_action(G, D_top)
+ if is_identity(Gtop):
+ return False
+
+ G_list = [identity for _ in range(len(pis))]
+ G_list[0] = Gtop
+
+ # Lastly we have to determine whether removing any prime
+ # factors of the order gives the identity of the group
+ if len(pis) > 1:
+ batch_cofactor_mul_generic(G_list, pis, group_action, 0, len(pis))
+ if not all([not is_identity(G) for G in G_list]):
+ return False
+
+ return True \ No newline at end of file
diff --git a/theta_lib/utilities/strategy.py b/theta_lib/utilities/strategy.py
new file mode 100644
index 0000000..550ef09
--- /dev/null
+++ b/theta_lib/utilities/strategy.py
@@ -0,0 +1,229 @@
+# ============================================================================ #
+# Compute optimised strategy for 2-isogeny chains (in dimensions 2 and 4) #
+# ============================================================================ #
+
+"""
+The function optimised_strategy has been taken from:
+https://github.com/FESTA-PKE/FESTA-SageMath
+
+Copyright (c) 2023 Andrea Basso, Luciano Maino and Giacomo Pope.
+
+Other functions are original work.
+"""
+
+from sage.all import log, cached_function
+import logging
+logger = logging.getLogger(__name__)
+#logger.setLevel("DEBUG")
+
+@cached_function
+def optimised_strategy(n, mul_c=1):
+ """
+ Algorithm 60: https://sike.org/files/SIDH-spec.pdf
+ Shown to be appropriate for (l,l)-chains in
+ https://ia.cr/2023/508
+
+ Note: the costs we consider are:
+ eval_c: the cost of one isogeny evaluation
+ mul_c: the cost of one element doubling
+ """
+
+ eval_c = 1.000
+ mul_c = mul_c
+
+ S = {1:[]}
+ C = {1:0 }
+ for i in range(2, n+1):
+ b, cost = min(((b, C[i-b] + C[b] + b*mul_c + (i-b)*eval_c) for b in range(1,i)), key=lambda t: t[1])
+ S[i] = [b] + S[i-b] + S[b]
+ C[i] = cost
+
+ return S[n]
+
+@cached_function
+def optimised_strategy_with_first_eval(n,mul_c=1,first_eval_c=1):
+ r"""
+ Adapted from optimised_strategy when the fist isogeny evaluation is more costly.
+ This is well suited to gluing comptations. Computes optimal strategies with constraint
+ at the beginning. This takes into account the fact that doublings on the codomain of
+ the first isogeny are impossible (because of zero dual theta constants).
+
+ CAUTION: When splittings are involved, do not use this function. Use
+ optimised_strategy_with_first_eval_and_splitting instead.
+
+ INPUT:
+ - n: number of leaves of the strategy (length of the isogeny).
+ - mul_c: relative cost of one doubling compared to one generic 2-isogeny evaluation.
+ - first_eval_c: relative cost of an evaluation of the first 2-isogeny (gluing)
+ compared to one generic 2-isogeny evaluation.
+
+ OUTPUT:
+ - S_left[n]: an optimal strategy of depth n with constraint at the beginning
+ represented as a sequence [s_0,...,s_{t-2}], where there is an index i for every
+ internal node of the strategy, where indices are ordered depth-first left-first
+ (as the way we move on the strategy) and s_i is the number of leaves to the right
+ of internal node i (see https://sike.org/files/SIDH-spec.pdf, pp. 16-17).
+ """
+
+ S_left, _, _, _ = optimised_strategies_with_first_eval(n,mul_c,first_eval_c)
+
+ return S_left[n]
+
+@cached_function
+def optimised_strategies_with_first_eval(n,mul_c=1,first_eval_c=1):
+ r"""
+ Adapted from optimised_strategy when the fist isogeny evaluation is more costly.
+ This is well suited to gluing comptations. Computes optimal strategies with constraint
+ at the beginning. This takes into account the fact that doublings on the codomain of
+ the first isogeny are impossible (because of zero dual theta constants).
+
+ CAUTION: When splittings are involved, do not use this function. Use
+ optimised_strategy_with_first_eval_and_splitting instead.
+
+ INPUT:
+ - n: number of leaves of the strategy (length of the isogeny).
+ - mul_c: relative cost of one doubling compared to one generic 2-isogeny evaluation.
+ - first_eval_c: relative cost of an evaluation of the first 2-isogeny (gluing)
+ compared to one generic 2-isogeny evaluation.
+
+ OUTPUT:
+ - S_left: Optimal strategies "on the left"/with constraint at the beginning i.e. meeting the
+ first left edge that do not contain any left edge on the line y=sqrt(3)*(x-1).
+ - S_right: Optimal strategies "on the right" i.e. not meeting the fisrt left edge (no constraint).
+ """
+
+ # print(f"Strategy eval: n={n}, mul_c={mul_c}, first_eval_c={first_eval_c}")
+
+ eval_c = 1.000
+ first_eval_c = first_eval_c
+ mul_c = mul_c
+
+ S_left = {1:[], 2:[1]} # Optimal strategies "on the left" i.e. meeting the first left edge
+ S_right = {1:[]} # Optimal strategies "on the right" i.e. not meeting the first left edge
+ C_left = {1:0, 2:mul_c+first_eval_c } # Cost of strategies on the left
+ C_right = {1:0 } # Cost of strategies on the right
+ for i in range(2, n+1):
+ # Optimisation on the right
+ b, cost = min(((b, C_right[i-b] + C_right[b] + b*mul_c + (i-b)*eval_c) for b in range(1,i)), key=lambda t: t[1])
+ S_right[i] = [b] + S_right[i-b] + S_right[b]
+ C_right[i] = cost
+
+ for i in range(3,n+1):
+ # Optimisation on the left (b<i-1 to avoid doublings on the codomain of the first isogeny)
+ b, cost = min(((b, C_left[i-b] + C_right[b] + b*mul_c + (i-1-b)*eval_c+first_eval_c) for b in range(1,i-1)), key=lambda t: t[1])
+ S_left[i] = [b] + S_left[i-b] + S_right[b]
+ C_left[i] = cost
+
+ return S_left, S_right, C_left, C_right
+
+@cached_function
+def optimised_strategy_with_first_eval_and_splitting(n,m,mul_c=1,first_eval_c=1):
+ eval_c = 1.000
+ first_eval_c = first_eval_c
+ mul_c = mul_c
+
+ S_left, S_middle, C_left, C_middle = optimised_strategies_with_first_eval(n-m,mul_c,first_eval_c)
+
+ ## Optimization of the right part (with constraint at the end only)
+
+ # Dictionnary of dictionnaries of translated strategies "on the right".
+ # trans_S_right[d][i] is an optimal strategy of depth i
+ # without left edge on the line y=sqrt(3)*(x-(i-1-d))
+ trans_S_right={}
+ trans_C_right={}
+
+ for d in range(1,m+1):
+ trans_S_right[d]={1:[]}
+ trans_C_right[d]={1:0}
+ if d==1:
+ for i in range(3,n-m+d):
+ b, cost = min(((b, C_middle[i-b] + trans_C_right[1][b] + b*mul_c + (i-b)*eval_c) for b in [1]+list(range(3,i))), key=lambda t: t[1])
+ trans_S_right[1][i] = [b] + S_middle[i-b] + trans_S_right[1][b]
+ trans_C_right[1][i] = cost
+ else:
+ for i in range(2,n-m+d):
+ if i!=d+1:
+ b = 1
+ cost = trans_C_right[d-b][i-b] + C_middle[b] + b*mul_c + (i-b)*eval_c
+ for k in range(2,min(i,d)):
+ cost_k = trans_C_right[d-k][i-k] + C_middle[k] + k*mul_c + (i-k)*eval_c
+ if cost_k<cost:
+ b = k
+ cost = cost_k
+ # k=d
+ if i>d:
+ cost_k = C_middle[i-d] + C_middle[d] + d*mul_c + (i-d)*eval_c
+ if cost_k<cost:
+ b = d
+ cost = cost_k
+ for k in range(d+2,i):
+ #print(d,i,k)
+ cost_k = C_middle[i-k] + trans_C_right[d][k] + k*mul_c + (i-k)*eval_c
+ if cost_k<cost:
+ b = k
+ cost = cost_k
+ if b<d:
+ trans_S_right[d][i] = [b] + trans_S_right[d-b][i-b] + S_middle[b]
+ trans_C_right[d][i] = cost
+ else:
+ trans_S_right[d][i] = [b] + S_middle[i-b] + trans_S_right[d][b]
+ trans_C_right[d][i] = cost
+
+ ## Optimization on the left (last part) taking into account the constraints at the beginning and at the end
+ for i in range(n-m+1,n+1):
+ d = i-(n-m)
+ b = 1
+ cost = C_left[i-b] + trans_C_right[d][b] + b*mul_c + (i-1-b)*eval_c + first_eval_c
+ for k in range(2,i):
+ if k!=d+1 and k!=n-1:
+ cost_k = C_left[i-k] + trans_C_right[d][k] + k*mul_c + (i-1-k)*eval_c + first_eval_c
+ if cost_k<cost:
+ b = k
+ cost = cost_k
+
+ S_left[i] = [b] + S_left[i-b] + trans_S_right[d][b]
+ C_left[i] = cost
+
+ return S_left[n]
+
+@cached_function
+def precompute_strategy_with_first_eval(e,m,M=1,S=0.8,I=100):
+ r"""
+ INPUT:
+ - e: isogeny chain length.
+ - m: length of the chain in dimension 2 before gluing in dimension 4.
+ - M: multiplication cost.
+ - S: squaring cost.
+ - I: inversion cost.
+
+ OUTPUT: Optimal strategy to compute an isogeny chain without splitting of
+ length e with m steps in dimension 2 before gluing in dimension 4.
+ """
+ n = e - m
+ eval_c = 4*(16*M+16*S)
+ mul_c = 4*(32*M+32*S)+(48*M+I)/log(n*1.0)
+ first_eval_c = 4*(2*I+(244+6*m)*M+(56+8*m)*S)
+
+ return optimised_strategy_with_first_eval(n, mul_c = mul_c/eval_c, first_eval_c = first_eval_c/eval_c)
+
+@cached_function
+def precompute_strategy_with_first_eval_and_splitting(e,m,M=1,S=0.8,I=100):
+ r"""
+ INPUT:
+ - e: isogeny chain length.
+ - m: length of the chain in dimension 2 before gluing in dimension 4.
+ - M: multiplication cost.
+ - S: squaring cost.
+ - I: inversion cost.
+
+ OUTPUT: Optimal strategy to compute an isogeny chain of length e
+ with m steps in dimension 2 before gluing in dimension 4 and
+ with splitting m steps before the end.
+ """
+ logger.debug(f"Strategy eval split: e={e}, m={m}")
+ n = e - m
+ eval_c = 4*(16*M+16*S)
+ mul_c = 4*(32*M+32*S)+(48*M+I)/log(n*1.0)
+ first_eval_c = 4*(2*I+(244+6*m)*M+(56+8*m)*S)
+
+ return optimised_strategy_with_first_eval_and_splitting(n, m, mul_c = mul_c/eval_c, first_eval_c = first_eval_c/eval_c)
diff --git a/theta_lib/utilities/supersingular.py b/theta_lib/utilities/supersingular.py
new file mode 100644
index 0000000..e760c1b
--- /dev/null
+++ b/theta_lib/utilities/supersingular.py
@@ -0,0 +1,413 @@
+"""
+Helper functions for the supersingular elliptic curve computations in FESTA
+"""
+
+# =========================================== #
+# Compute points of order D and Torsion Bases #
+# =========================================== #
+
+"""
+Most of this code has been taken from:
+https://github.com/FESTA-PKE/FESTA-SageMath
+
+Copyright (c) 2023 Andrea Basso, Luciano Maino and Giacomo Pope.
+
+Functions with another Copyright mention are not from the above authors.
+"""
+
+# Sage Imports
+from sage.all import ZZ, GF, Integers
+
+# Local imports
+from .order import has_order_D
+from .discrete_log import weil_pairing_pari, ell_discrete_log_pari, discrete_log_pari
+from .fast_sqrt import sqrt_Fp2, sqrt_Fp
+
+
+
+
+def random_point(E):
+ """
+ Returns a random point on the elliptic curve E
+ assumed to be in Montgomery form with a base
+ field which characteristic p = 3 mod 4
+ """
+ A = E.a_invariants()[1]
+ if E.a_invariants() != (0,A,0,1,0):
+ raise ValueError("Function `generate_point` assumes the curve E is in the Montgomery model")
+
+ # Try 10000 times then give up, just protection
+ # for infinite loops
+ F = E.base_ring()
+ for _ in range(10000):
+ x = F.random_element()
+ y2 = x*(x**2 + A*x + 1)
+ if y2.is_square():
+ y = sqrt_Fp2(y2)
+ return E(x, y)
+
+ raise ValueError("Generated 10000 points, something is probably going wrong somewhere.")
+
+def generate_point(E, x_start=0):
+ """
+ Generate points on a curve E with x-coordinate
+ i + x for x in Fp and i is the generator of Fp^2
+ such that i^2 = -1.
+ """
+ F = E.base_ring()
+ one = F.one()
+
+ if x_start:
+ x = x_start + one
+ else:
+ x = F.gen() + one
+
+ A = E.a_invariants()[1]
+ if E.a_invariants() != (0,A,0,1,0):
+ raise ValueError("Function `generate_point` assumes the curve E is in the Montgomery model")
+
+ # Try 10000 times then give up, just protection
+ # for infinite loops
+ for _ in range(10000):
+ y2 = x*(x**2 + A*x + 1)
+ if y2.is_square():
+ y = sqrt_Fp2(y2)
+ yield E(x, y)
+ x += one
+
+ raise ValueError("Generated 10000 points, something is probably going wrong somewhere.")
+
+def generate_Fp_point(E):
+ """
+ Returns a random Fp-rational point on the
+ elliptic curve E assumed to be in Montgomery
+ form with a base field which characteristic
+ p = 3 mod 4 and coefficients defined over Fp.
+
+ Copyright (c) Pierrick Dartois 2025.
+ """
+
+ A = E.a_invariants()[1]
+ if E.a_invariants() != (0,A,0,1,0):
+ raise ValueError("Function `generate_Fp_point` assumes the curve E is in the Montgomery model")
+ if A[1] != 0:
+ raise ValueError("The curve E should be Fp-rational")
+
+ p=E.base_field().characteristic()
+ Fp=GF(p,proof=False)
+ one=Fp(1)
+ x=one
+
+ # Try 10000 times then give up, just protection
+ # for infinite loops
+ for _ in range(10000):
+ y2 = x*(x**2 + A*x + 1)
+ if y2.is_square():
+ y = sqrt_Fp(y2)
+ yield E(x, y)
+ x += one
+
+ raise ValueError("Generated 10000 points, something is probably going wrong somewhere.")
+
+def generate_point_order_D(E, D, x_start=0):
+ """
+ Input: An elliptic curve E / Fp2
+ An integer D dividing (p +1)
+ Output: A point P of order D.
+ """
+ p = E.base().characteristic()
+ n = (p + 1) // D
+
+ Ps = generate_point(E, x_start=x_start)
+ for G in Ps:
+ P = n * G
+
+ # Case when we randomly picked
+ # a point in the n-torsion
+ if P.is_zero():
+ continue
+
+ # Check that P has order exactly D
+ if has_order_D(P, D):
+ P._order = ZZ(D)
+ yield P
+
+ raise ValueError(f"Never found a point P of order D.")
+
+def generate_Fp_point_order_D(E, D):
+ """
+ Input: An elliptic curve E / Fp2
+ An integer D dividing (p +1)
+ Output: A point P of order D.
+
+ Copyright (c) Pierrick Dartois 2025.
+ """
+ p = E.base().characteristic()
+ n = (p + 1) // D
+ if D%2==0:
+ n=n//2
+
+ Ps = generate_Fp_point(E)
+ for G in Ps:
+ P = n * G
+
+ # Case when we randomly picked
+ # a point in the n-torsion
+ if P.is_zero():
+ continue
+
+ # Check that P has order exactly D
+ if has_order_D(P, D):
+ P._order = ZZ(D)
+ yield P
+
+ raise ValueError(f"Never found a point P of order D.")
+
+def compute_point_order_D(E, D, x_start=0):
+ """
+ Wrapper function around a generator which returns the first
+ point of order D
+ """
+ return generate_point_order_D(E, D, x_start=x_start).__next__()
+
+def compute_Fp_point_order_D(E, D):
+ """
+ Wrapper function around a generator which returns the first
+ point of order D
+
+ Copyright (c) Pierrick Dartois 2025.
+ """
+ return generate_Fp_point_order_D(E, D).__next__()
+
+def compute_linearly_independent_point_with_pairing(E, P, D, x_start=0):
+ """
+ Input: An elliptic curve E / Fp2
+ A point P ∈ E[D]
+ An integer D dividing (p +1)
+ Output: A point Q such that E[D] = <P, Q>
+ The Weil pairing e(P,Q)
+ """
+ Qs = generate_point_order_D(E, D, x_start=x_start)
+ for Q in Qs:
+ # Make sure the point is linearly independent
+ pair = weil_pairing_pari(P, Q, D)
+ if has_order_D(pair, D, multiplicative=True):
+ Q._order = ZZ(D)
+ return Q, pair
+ raise ValueError("Never found a point Q linearly independent to P")
+
+def compute_linearly_independent_point(E, P, D, x_start=0):
+ """
+ Wrapper function around `compute_linearly_independent_point_with_pairing`
+ which only returns a linearly independent point
+ """
+ Q, _ = compute_linearly_independent_point_with_pairing(E, P, D, x_start=x_start)
+ return Q
+
+def torsion_basis_with_pairing(E, D):
+ """
+ Generate basis of E(Fp^2)[D] of supersingular curve
+
+ While computing E[D] = <P, Q> we naturally compute the
+ Weil pairing e(P,Q), which we also return as in some cases
+ the Weil pairing is then used when solving the BiDLP
+ """
+ p = E.base().characteristic()
+
+ # Ensure D divides the curve's order
+ if (p + 1) % D != 0:
+ print(f"{ZZ(D).factor() = }")
+ print(f"{ZZ(p+1).factor() = }")
+ raise ValueError(f"D must divide the point's order")
+
+ P = compute_point_order_D(E, D)
+ Q, ePQ = compute_linearly_independent_point_with_pairing(E, P, D, x_start=P[0])
+
+ return P, Q, ePQ
+
+def torsion_basis(E, D):
+ """
+ Wrapper function around torsion_basis_with_pairing which only
+ returns the torsion basis <P,Q> = E[D]
+ """
+ P, Q, _ = torsion_basis_with_pairing(E, D)
+ return P, Q
+
+def torsion_basis_2f_frobenius(E,f):
+ """
+ Returns a basis (P, Q) of E[2^f] with pi_p(P)=P and pi_p(Q)=-Q,
+ pi_p being the p-th Frobenius endomorphism of E.
+
+ Copyright (c) Pierrick Dartois 2025.
+ """
+
+ P = compute_Fp_point_order_D(E, 2**f)
+
+ i = E.base_field().gen()
+ p = E.base_field().characteristic()
+
+ R = compute_linearly_independent_point(E, P, 2**f)
+ piR = E(R[0]**p,R[1]**p)
+ piR_p_R = piR + R
+
+ two_lamb = ell_discrete_log_pari(E,piR_p_R,P,2**f)
+ lamb = two_lamb//2
+ Q = R-lamb*P
+
+ piQ = E(Q[0]**p,Q[1]**p)
+
+ assert piQ == -Q
+
+ return P, Q
+
+def torsion_basis_to_Fp_rational_point(E,P,Q,D):
+ """
+ Returns an Fp-rational point R=[lamb]P+[mu]Q
+ of D-torsion given a D-torsion basis (P,Q) of E.
+
+ Copyright (c) Pierrick Dartois 2025.
+ """
+
+ p=E.base_field().characteristic()
+ piP = E(P[0]**p,P[1]**p)
+ piQ = E(Q[0]**p,Q[1]**p)
+
+ e_PQ = weil_pairing_pari(P,Q,D)
+ e_PpiP = weil_pairing_pari(P, piP, D)
+ e_QpiP = weil_pairing_pari(Q, piP, D)
+ e_PpiQ = weil_pairing_pari(P, piQ, D)
+ e_QpiQ = weil_pairing_pari(Q, piQ, D)
+
+ # pi(P)=[a]P+[b]Q, pi(Q)=[c]P+[d]Q
+ a = -discrete_log_pari(e_QpiP,e_PQ,D)
+ b = discrete_log_pari(e_PpiP,e_PQ,D)
+ c = -discrete_log_pari(e_QpiQ,e_PQ,D)
+ d = discrete_log_pari(e_PpiQ,e_PQ,D)
+
+ ZD = Integers(D)
+
+ if a%D==1:
+ mu = ZD(b/d)
+ R=P+mu*Q
+ elif c==0:
+ # c=0 and a!=1 => d=1 so pi(Q)=Q
+ R=Q
+ else:
+ # c!=0 and a!=1
+ mu = ZD((1-a)/c)
+ R=P+mu*Q
+
+ piR = E(R[0]**p,R[1]**p)
+
+ assert piR==R
+
+ return R
+
+# =========================================== #
+# Entangled torsion basis for fast E[2^k] #
+# =========================================== #
+
+def precompute_elligator_tables(F):
+ """
+ Precomputes tables of quadratic non-residue or
+ quadratic residue in Fp2. Used to compute entangled
+ torsion bases following https://ia.cr/2017/1143
+ """
+ u = 2*F.gen()
+
+ T1 = dict()
+ T2 = dict()
+ # TODO: estimate how large r should be
+ for r in range(1, 30):
+ v = 1 / (1 + u*r**2)
+ if v.is_square():
+ T2[r] = v
+ else:
+ T1[r] = v
+ return T1, T2
+
+def entangled_torsion_basis(E, elligator_tables, cofactor):
+ """
+ Optimised algorithm following https://ia.cr/2017/1143
+ which modifies the elligator method of hashing to points
+ to find points P,Q of order k*2^b. Clearing the cofactor
+ gives the torsion basis without checking the order or
+ computing a Weil pairing.
+
+ To do this, we need tables TQNR, TQR of pairs of values
+ (r, v) where r is an integer and v = 1/(1 + ur^2) where
+ v is either a quadratic non-residue or quadratic residue
+ in Fp2 and u = 2i = 2*sqrt(-1).
+ """
+ F = E.base_ring()
+ p = F.characteristic()
+ p_sqrt = (p+1)//4
+
+ i = F.gen()
+ u = 2 * i
+ u0 = 1 + i
+
+ TQNR, TQR = elligator_tables
+
+ # Pick the look up table depending on whether
+ # A = a + ib is a QR or NQR
+ A = E.a_invariants()[1]
+ if (0,A,0,1,0) != E.a_invariants():
+ raise ValueError("The elliptic curve E must be in Montgomery form")
+ if A.is_square():
+ T = TQNR
+ else:
+ T = TQR
+
+ # Look through the table to find point with
+ # rational (x,y)
+ y = None
+ for r, v in T.items():
+ x = -A * v
+
+ t = x * (x**2 + A*x + 1)
+
+ # Break when we find rational y: t = y^2
+ c, d = t.list()
+ z = c**2 + d**2
+ s = z**p_sqrt
+ if s**2 == z:
+ y = sqrt_Fp2(t)
+ break
+
+ if y is None:
+ raise ValueError("Never found a y-coordinate, increase the lookup table size")
+
+ z = (c + s) // 2
+ alpha = z**p_sqrt
+ beta = d / (2*alpha)
+
+ if alpha**2 == z:
+ y = F([alpha, beta])
+ else:
+ y = -F([beta, alpha])
+
+ S1 = E([x, y])
+ S2 = E([u*r**2*x, u0*r*y])
+
+ return cofactor*S1, cofactor*S2
+
+# =============================================== #
+# Ensure Basis <P,Q> of E[2^k] has (0,0) under Q #
+# =============================================== #
+
+def fix_torsion_basis_renes(P, Q, k):
+ """
+ Set the torsion basis P,Q such that
+ 2^(k-1)Q = (0,0) to ensure that (0,0)
+ is never a kernel of a two isogeny
+ """
+ cofactor = 2**(k-1)
+
+ R = cofactor*P
+ if R[0] == 0:
+ return Q, P
+ R = cofactor*Q
+ if R[0] == 0:
+ return P, Q
+ return P, P + Q