Add bc to CI common prerequisities (required to run examples)
[qpms.git] / qpms / lll.c
blobbf381fab7ffb8bf79343c7f7f903242350436de6
1 #include <cblas.h>
2 #include <string.h>
3 #include <math.h>
4 #include <qpms_error.h>
6 static inline size_t mu_index(size_t k, size_t j) {
7 return k * (k - 1) / 2 + j;
10 /// Gram-Schmidt orthogonalisation.
11 /** Does not return the actual orthogonal basis (as it is not needed
12 * for the LLL algorithm as such) but rather only
13 * the mu(i,j) coefficients and squared norms of the orthogonal vectors
15 static void gram_schmidt(
16 double *mu, ///< Array of \f[ \mu_{k,j} = \frac{\vect{v}_i \cdot\vect{v}_i^*}{|\vect v_j^*|^2}\f] of length mu_index(bsize, 0),
17 double *vstar_sqnorm, ///< Array of \f$ \abs{\vect v_i^*}^2 \f$ of length bsize.
18 const double *v, ///< Vectors to orthogonalise, size [bsize][ndim],
19 const size_t bsize, ///< Size of the basis ( = dimensionality of the lattice)
20 const size_t ndim ///< Dimensionality of the space.
23 double v_star[bsize][ndim];
24 for (size_t i = 0; i < bsize; ++i) {
25 memcpy(v_star[i], v+i*ndim, ndim*sizeof(double));
26 double parallel_part[ndim /*???*/];
27 memset(parallel_part, 0, sizeof(parallel_part));
28 for (size_t j = 0; j < i; ++j) {
29 double mu_numerator = cblas_ddot(ndim, v + i*ndim, 1, v_star[j], 1);
30 mu[mu_index(i, j)] = mu_numerator / vstar_sqnorm[j];
31 cblas_daxpy(ndim, mu[mu_index(i, j)], v_star[j], 1, parallel_part, 1);
33 cblas_daxpy(ndim, -1, parallel_part, 1, v_star[i], 1);
34 vstar_sqnorm[i] = cblas_ddot(ndim, v_star[i], 1, v_star[i], 1);
38 static inline double fsq(double x) { return x * x; };
40 // A naïve implementation of Lenstra-Lenstra-Lovász algorithm.
41 int qpms_reduce_lattice_basis(double *b, const size_t bsize, const size_t ndim,
42 double delta)
44 QPMS_ENSURE(bsize <= ndim,
45 "The basis must have less elements (%zd) than the space dimension (%zd).",
46 bsize, ndim);
47 double mu[mu_index(bsize,0)];
48 double bstar_sqnorm[bsize];
49 gram_schmidt(mu, bstar_sqnorm, b, bsize, ndim);
50 size_t k = 1;
51 while (k < bsize) {
52 // Step 1 of LLL, achieve mu(k, k-1) <= 0.5
53 if (fabs(mu[mu_index(k, k-1)]) > 0.5) {
54 double r = round(mu[mu_index(k, k-1)]);
55 // "normalize" b(k), replacing it with b(k) - r b(k-1)
56 cblas_daxpy(ndim, -r, b+(k-1)*ndim, 1, b+k*ndim, 1);
57 // update mu to correspond to the new b(k)
58 for(size_t j = 0; j < bsize; ++j)
59 mu[mu_index(k, j)] -= r*mu[mu_index(k-1, j)];
61 // Step 2
62 if (k > 0 && // Case 1
63 bstar_sqnorm[k] < (delta - fsq(mu[mu_index(k, k-1)])) * bstar_sqnorm[k-1]) {
64 // swap b(k) and b(k-1)
65 cblas_dswap(ndim, &b[k*ndim], 1, &b[(k-1)*ndim], 1);
66 double B_k = bstar_sqnorm[k];
67 double mu_kkm1_old = mu[mu_index(k, k-1)];
68 double C = B_k + fsq(mu_kkm1_old) * bstar_sqnorm[k-1];
69 mu[mu_index(k, k-1)] *= bstar_sqnorm[k-1] / C;
70 bstar_sqnorm[k] *= bstar_sqnorm[k-1] / C;
71 bstar_sqnorm[k-1] = C;
72 for(size_t j = k+1; j < bsize; ++j) {
73 double m = mu[mu_index(j, k-1)];
74 mu[mu_index(j, k-1)] = m*m + mu[mu_index(j, k)] * B_k / C;
75 mu[mu_index(j, k)] = m - mu[mu_index(j, k)] * mu_kkm1_old;
77 for(size_t j = 0; j < k-1; ++j) {
78 double m = mu[mu_index(k-1, j)];
79 mu[mu_index(k-1, j)] = mu[mu_index(k, j)];
80 mu[mu_index(k, j)] = m;
82 --k;
83 } else { // Case 2
84 size_t l = k;
85 while(l > 0) {
86 --l;
87 if(fabs(mu[mu_index(k, l)] > 0.5)) {
88 double r = round(mu[mu_index(k, l)]);
89 cblas_daxpy(ndim, -r, b+l*ndim, 1, b+k*ndim, 1);
90 for (size_t j = 0; j < l; ++j)
91 mu[mu_index(k, j)] -= r * mu[mu_index(l, j)];
92 mu[mu_index(k, l)] -= r;
93 l = k;
96 ++k;
99 return 0;