55 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) 57 #define GMP_LIMB_MAX (~ (mp_limb_t) 0) 58 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) 60 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) 61 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) 63 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) 64 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) 66 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) 67 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) 69 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) 70 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) 72 #define gmp_assert_nocarry(x) do { \ 77 #define gmp_clz(count, x) do { \ 78 mp_limb_t __clz_x = (x); \ 81 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ 84 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ 89 #define gmp_ctz(count, x) do { \ 90 mp_limb_t __ctz_x = (x); \ 91 unsigned __ctz_c = 0; \ 92 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ 93 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ 96 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ 100 (sh) = (ah) + (bh) + (__x < (al)); \ 104 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ 108 (sh) = (ah) - (bh) - ((al) < (bl)); \ 112 #define gmp_umul_ppmm(w1, w0, u, v) \ 114 mp_limb_t __x0, __x1, __x2, __x3; \ 115 unsigned __ul, __vl, __uh, __vh; \ 116 mp_limb_t __u = (u), __v = (v); \ 118 __ul = __u & GMP_LLIMB_MASK; \ 119 __uh = __u >> (GMP_LIMB_BITS / 2); \ 120 __vl = __v & GMP_LLIMB_MASK; \ 121 __vh = __v >> (GMP_LIMB_BITS / 2); \ 123 __x0 = (mp_limb_t) __ul * __vl; \ 124 __x1 = (mp_limb_t) __ul * __vh; \ 125 __x2 = (mp_limb_t) __uh * __vl; \ 126 __x3 = (mp_limb_t) __uh * __vh; \ 128 __x1 += __x0 >> (GMP_LIMB_BITS / 2); \ 131 __x3 += GMP_HLIMB_BIT; \ 133 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ 134 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ 137 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ 139 mp_limb_t _qh, _ql, _r, _mask; \ 140 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ 141 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ 142 _r = (nl) - _qh * (d); \ 143 _mask = -(mp_limb_t) (_r > _ql); \ 156 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ 158 mp_limb_t _q0, _t1, _t0, _mask; \ 159 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ 160 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ 163 (r1) = (n1) - (d1) * (q); \ 164 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ 165 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ 166 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ 170 _mask = - (mp_limb_t) ((r1) >= _q0); \ 172 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ 175 if ((r1) > (d1) || (r0) >= (d0)) \ 178 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ 184 #define MP_LIMB_T_SWAP(x, y) \ 186 mp_limb_t __mp_limb_t_swap__tmp = (x); \ 188 (y) = __mp_limb_t_swap__tmp; \ 190 #define MP_SIZE_T_SWAP(x, y) \ 192 mp_size_t __mp_size_t_swap__tmp = (x); \ 194 (y) = __mp_size_t_swap__tmp; \ 196 #define MP_BITCNT_T_SWAP(x,y) \ 198 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ 200 (y) = __mp_bitcnt_t_swap__tmp; \ 202 #define MP_PTR_SWAP(x, y) \ 204 mp_ptr __mp_ptr_swap__tmp = (x); \ 206 (y) = __mp_ptr_swap__tmp; \ 208 #define MP_SRCPTR_SWAP(x, y) \ 210 mp_srcptr __mp_srcptr_swap__tmp = (x); \ 212 (y) = __mp_srcptr_swap__tmp; \ 215 #define MPN_PTR_SWAP(xp,xs, yp,ys) \ 217 MP_PTR_SWAP (xp, yp); \ 218 MP_SIZE_T_SWAP (xs, ys); \ 220 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ 222 MP_SRCPTR_SWAP (xp, yp); \ 223 MP_SIZE_T_SWAP (xs, ys); \ 226 #define MPZ_PTR_SWAP(x, y) \ 228 mpz_ptr __mpz_ptr_swap__tmp = (x); \ 230 (y) = __mpz_ptr_swap__tmp; \ 232 #define MPZ_SRCPTR_SWAP(x, y) \ 234 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ 236 (y) = __mpz_srcptr_swap__tmp; \ 239 const int mp_bits_per_limb = GMP_LIMB_BITS;
244 gmp_die (
const char *msg)
246 fprintf (stderr,
"%s\n", msg);
251 gmp_default_alloc (
size_t size)
259 gmp_die(
"gmp_default_alloc: Virtual memory exhausted.");
265 gmp_default_realloc (
void *old,
size_t old_size,
size_t new_size)
269 p = realloc (old, new_size);
272 gmp_die(
"gmp_default_realoc: Virtual memory exhausted.");
278 gmp_default_free (
void *p,
size_t size)
283 static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
284 static void * (*gmp_reallocate_func) (
void *, size_t, size_t) = gmp_default_realloc;
285 static void (*gmp_free_func) (
void *, size_t) = gmp_default_free;
288 mp_get_memory_functions (
void *(**alloc_func) (
size_t),
289 void *(**realloc_func) (
void *,
size_t,
size_t),
290 void (**free_func) (
void *,
size_t))
293 *alloc_func = gmp_allocate_func;
296 *realloc_func = gmp_reallocate_func;
299 *free_func = gmp_free_func;
303 mp_set_memory_functions (
void *(*alloc_func) (
size_t),
304 void *(*realloc_func) (
void *,
size_t,
size_t),
305 void (*free_func) (
void *,
size_t))
308 alloc_func = gmp_default_alloc;
310 realloc_func = gmp_default_realloc;
312 free_func = gmp_default_free;
314 gmp_allocate_func = alloc_func;
315 gmp_reallocate_func = realloc_func;
316 gmp_free_func = free_func;
319 #define gmp_xalloc(size) ((*gmp_allocate_func)((size))) 320 #define gmp_free(p) ((*gmp_free_func) ((p), 0)) 323 gmp_xalloc_limbs (mp_size_t size)
325 return gmp_xalloc (size *
sizeof (mp_limb_t));
329 gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
332 return (*gmp_reallocate_func) (old, 0, size *
sizeof (mp_limb_t));
339 mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
342 for (i = 0; i < n; i++)
347 mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
354 mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
359 return ap[n] > bp[n] ? 1 : -1;
365 mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
368 return an < bn ? -1 : 1;
370 return mpn_cmp (ap, bp, an);
374 mpn_normalized_size (mp_srcptr xp, mp_size_t n)
376 for (; n > 0 && xp[n-1] == 0; n--)
381 #define mpn_zero_p(xp, n) (mpn_normalized_size ((xp), (n)) == 0) 384 mpn_zero (mp_ptr rp, mp_size_t n)
388 for (i = 0; i < n; i++)
393 mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
401 mp_limb_t r = ap[i] + b;
412 mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
417 for (i = 0, cy = 0; i < n; i++)
420 a = ap[i]; b = bp[i];
431 mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
437 cy = mpn_add_n (rp, ap, bp, bn);
439 cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
444 mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
455 mp_limb_t cy = a < b;;
465 mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
470 for (i = 0, cy = 0; i < n; i++)
473 a = ap[i]; b = bp[i];
483 mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
489 cy = mpn_sub_n (rp, ap, bp, bn);
491 cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
496 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
498 mp_limb_t ul, cl, hpl, lpl;
506 gmp_umul_ppmm (hpl, lpl, ul, vl);
509 cl = (lpl < cl) + hpl;
519 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
521 mp_limb_t ul, cl, hpl, lpl, rl;
529 gmp_umul_ppmm (hpl, lpl, ul, vl);
532 cl = (lpl < cl) + hpl;
545 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
547 mp_limb_t ul, cl, hpl, lpl, rl;
555 gmp_umul_ppmm (hpl, lpl, ul, vl);
558 cl = (lpl < cl) + hpl;
571 mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
580 rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
581 rp += 1, vp += 1, vn -= 1;
588 rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
589 rp += 1, vp += 1, vn -= 1;
595 mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
597 mpn_mul (rp, ap, n, bp, n);
601 mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
603 mpn_mul (rp, ap, n, ap, n);
607 mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n,
unsigned int cnt)
609 mp_limb_t high_limb, low_limb;
616 assert (cnt < GMP_LIMB_BITS);
621 tnc = GMP_LIMB_BITS - cnt;
623 retval = low_limb >> tnc;
624 high_limb = (low_limb << cnt);
626 for (i = n; --i != 0;)
629 *--rp = high_limb | (low_limb >> tnc);
630 high_limb = (low_limb << cnt);
638 mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n,
unsigned int cnt)
640 mp_limb_t high_limb, low_limb;
647 assert (cnt < GMP_LIMB_BITS);
649 tnc = GMP_LIMB_BITS - cnt;
651 retval = (high_limb << tnc);
652 low_limb = high_limb >> cnt;
654 for (i = n; --i != 0;)
657 *rp++ = low_limb | (high_limb << tnc);
658 low_limb = high_limb >> cnt;
666 mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
671 assert (ux == 0 || ux == GMP_LIMB_MAX);
672 assert (0 <= i && i <= un );
678 return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
682 return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
686 mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
689 i = bit / GMP_LIMB_BITS;
691 return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
696 mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
699 i = bit / GMP_LIMB_BITS;
701 return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
702 i, ptr, i, GMP_LIMB_MAX);
708 mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
717 assert (u1 >= GMP_LIMB_HIGHBIT);
719 ul = u1 & GMP_LLIMB_MASK;
720 uh = u1 >> (GMP_LIMB_BITS / 2);
723 r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
725 p = (mp_limb_t) qh * ul;
741 p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
742 ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
745 r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
747 if (r >= (p << (GMP_LIMB_BITS / 2)))
752 m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
774 gmp_umul_ppmm (th, tl, u0, m);
779 m -= ((r > u1) | ((r == u1) & (tl > u0)));
804 inv->d1 = d << shift;
805 inv->di = mpn_invert_limb (inv->d1);
810 mp_limb_t d1, mp_limb_t d0)
819 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
824 inv->di = mpn_invert_3by2 (d1, d0);
829 mp_srcptr dp, mp_size_t dn)
834 mpn_div_qr_1_invert (inv, dp[0]);
836 mpn_div_qr_2_invert (inv, dp[1], dp[0]);
849 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
850 d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
854 inv->di = mpn_invert_3by2 (d1, d0);
861 mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
870 tp = gmp_xalloc_limbs (nn);
871 r = mpn_lshift (tp, np, nn, inv->shift);
883 gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
890 return r >> inv->shift;
894 mpn_div_qr_1 (mp_ptr qp, mp_srcptr np, mp_size_t nn, mp_limb_t d)
899 if ((d & (d-1)) == 0)
901 mp_limb_t r = np[0] & (d-1);
905 mpn_copyi (qp, np, nn);
910 mpn_rshift (qp, np, nn, shift);
918 mpn_div_qr_1_invert (&inv, d);
919 return mpn_div_qr_1_preinv (qp, np, nn, &inv);
924 mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
929 mp_limb_t d1, d0, di, r1, r0;
940 tp = gmp_xalloc_limbs (nn);
941 r1 = mpn_lshift (tp, np, nn, shift);
954 gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
963 assert ((r0 << (GMP_LIMB_BITS - shift)) == 0);
964 r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
976 mpn_div_qr_2 (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
977 mp_limb_t d1, mp_limb_t d0)
982 mpn_div_qr_2_invert (&inv, d1, d0);
983 mpn_div_qr_2_preinv (qp, rp, np, nn, &inv);
988 mpn_div_qr_pi1 (mp_ptr qp,
989 mp_ptr np, mp_size_t nn, mp_limb_t n1,
990 mp_srcptr dp, mp_size_t dn,
1005 assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
1015 mp_limb_t n0 = np[dn-1+i];
1017 if (n1 == d1 && n0 == d0)
1020 mpn_submul_1 (np+i, dp, dn, q);
1025 gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
1027 cy = mpn_submul_1 (np + i, dp, dn-2, q);
1037 n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
1051 mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
1052 mp_srcptr dp, mp_size_t dn,
1059 np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
1061 mpn_div_qr_2_preinv (qp, np, np, nn, inv);
1067 assert (inv->d1 == dp[dn-1]);
1068 assert (inv->d0 == dp[dn-2]);
1069 assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
1073 nh = mpn_lshift (np, np, nn, shift);
1077 mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
1080 gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
1085 mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
1093 mpn_div_qr_invert (&inv, dp, dn);
1094 if (dn > 2 && inv.shift > 0)
1096 tp = gmp_xalloc_limbs (dn);
1097 gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
1100 mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
1108 mpn_base_power_of_two_p (
unsigned b)
1139 m = GMP_LIMB_MAX / b;
1140 for (exp = 1, p = b; p <= m; exp++)
1148 mpn_limb_size_in_base_2 (mp_limb_t u)
1154 return GMP_LIMB_BITS - shift;
1158 mpn_get_str_bits (
unsigned char *sp,
unsigned bits, mp_srcptr up, mp_size_t un)
1165 sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
1168 mask = (1U << bits) - 1;
1170 for (i = 0, j = sn, shift = 0; j-- > 0;)
1172 unsigned char digit = up[i] >> shift;
1176 if (shift >= GMP_LIMB_BITS && ++i < un)
1178 shift -= GMP_LIMB_BITS;
1179 digit |= up[i] << (bits - shift);
1181 sp[j] = digit & mask;
1189 mpn_limb_get_str (
unsigned char *sp, mp_limb_t w,
1193 for (i = 0; w > 0; i++)
1197 h = w >> (GMP_LIMB_BITS - binv->shift);
1198 l = w << binv->shift;
1200 gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
1201 assert ( (r << (GMP_LIMB_BITS - binv->shift)) == 0);
1210 mpn_get_str_other (
unsigned char *sp,
1212 mp_ptr up, mp_size_t un)
1218 mpn_div_qr_1_invert (&binv, base);
1225 mpn_div_qr_1_invert (&bbinv, info->bb);
1231 w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
1232 un -= (up[un-1] == 0);
1233 done = mpn_limb_get_str (sp + sn, w, &binv);
1235 for (sn += done; done < info->exp; done++)
1240 sn += mpn_limb_get_str (sp + sn, up[0], &binv);
1243 for (i = 0; 2*i + 1 < sn; i++)
1245 unsigned char t = sp[i];
1246 sp[i] = sp[sn - i - 1];
1254 mpn_get_str (
unsigned char *sp,
int base, mp_ptr up, mp_size_t un)
1259 assert (up[un-1] > 0);
1261 bits = mpn_base_power_of_two_p (base);
1263 return mpn_get_str_bits (sp, bits, up, un);
1268 mpn_get_base_info (&info, base);
1269 return mpn_get_str_other (sp, base, &info, up, un);
1274 mpn_set_str_bits (mp_ptr rp,
const unsigned char *sp,
size_t sn,
1281 for (j = sn, rn = 0, shift = 0; j-- > 0; )
1290 rp[rn-1] |= (mp_limb_t) sp[j] << shift;
1292 if (shift >= GMP_LIMB_BITS)
1294 shift -= GMP_LIMB_BITS;
1296 rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
1300 rn = mpn_normalized_size (rp, rn);
1305 mpn_set_str_other (mp_ptr rp,
const unsigned char *sp,
size_t sn,
1313 k = 1 + (sn - 1) % info->exp;
1318 w = w * b + sp[j++];
1322 for (rn = (w > 0); j < sn;)
1327 for (k = 1; k < info->exp; k++)
1328 w = w * b + sp[j++];
1330 cy = mpn_mul_1 (rp, rp, rn, info->bb);
1331 cy += mpn_add_1 (rp, rp, rn, w);
1341 mpn_set_str (mp_ptr rp,
const unsigned char *sp,
size_t sn,
int base)
1348 bits = mpn_base_power_of_two_p (base);
1350 return mpn_set_str_bits (rp, sp, sn, bits);
1355 mpn_get_base_info (&info, base);
1356 return mpn_set_str_other (rp, sp, sn, base, &info);
1367 r->_mp_d = gmp_xalloc_limbs (1);
1373 mpz_init2 (mpz_t r, mp_bitcnt_t bits)
1377 bits -= (bits != 0);
1378 rn = 1 + bits / GMP_LIMB_BITS;
1382 r->_mp_d = gmp_xalloc_limbs (rn);
1388 gmp_free (r->_mp_d);
1392 mpz_realloc (mpz_t r, mp_size_t size)
1394 size = GMP_MAX (size, 1);
1396 r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
1397 r->_mp_alloc = size;
1399 if (GMP_ABS (r->_mp_size) > size)
1406 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ 1407 ? mpz_realloc(z,n) \ 1412 mpz_set_si (mpz_t r,
signed long int x)
1419 r->_mp_d[0] = GMP_NEG_CAST (
unsigned long int, x);
1424 mpz_set_ui (mpz_t r,
unsigned long int x)
1436 mpz_set (mpz_t r,
const mpz_t x)
1444 n = GMP_ABS (x->_mp_size);
1445 rp = MPZ_REALLOC (r, n);
1447 mpn_copyi (rp, x->_mp_d, n);
1448 r->_mp_size = x->_mp_size;
1453 mpz_init_set_si (mpz_t r,
signed long int x)
1460 mpz_init_set_ui (mpz_t r,
unsigned long int x)
1467 mpz_init_set (mpz_t r,
const mpz_t x)
1474 mpz_fits_slong_p (
const mpz_t u)
1476 mp_size_t us = u->_mp_size;
1481 return u->_mp_d[0] < GMP_LIMB_HIGHBIT;
1483 return u->_mp_d[0] <= GMP_LIMB_HIGHBIT;
1489 mpz_fits_ulong_p (
const mpz_t u)
1491 mp_size_t us = u->_mp_size;
1493 return (us == (us > 0));
1497 mpz_get_si (
const mpz_t u)
1499 mp_size_t us = u->_mp_size;
1502 return (
long) (u->_mp_d[0] & ~GMP_LIMB_HIGHBIT);
1504 return (
long) (- u->_mp_d[0] | GMP_LIMB_HIGHBIT);
1510 mpz_get_ui (
const mpz_t u)
1512 return u->_mp_size == 0 ? 0 : u->_mp_d[0];
1516 mpz_size (
const mpz_t u)
1518 return GMP_ABS (u->_mp_size);
1522 mpz_getlimbn (
const mpz_t u, mp_size_t n)
1524 if (n >= 0 && n < GMP_ABS (u->_mp_size))
1531 mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
1533 mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
1537 mpz_limbs_read (mpz_srcptr x)
1543 mpz_limbs_modify (mpz_t x, mp_size_t n)
1546 return MPZ_REALLOC (x, n);
1550 mpz_limbs_write (mpz_t x, mp_size_t n)
1552 return mpz_limbs_modify (x, n);
1556 mpz_limbs_finish (mpz_t x, mp_size_t xs)
1559 xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
1560 x->_mp_size = xs < 0 ? -xn : xn;
1564 mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1567 x->_mp_d = (mp_ptr) xp;
1568 mpz_limbs_finish (x, xs);
1575 mpz_set_d (mpz_t r,
double x)
1586 if (x != x || x == x * 0.5)
1601 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1603 for (rn = 1; x >= B; rn++)
1606 rp = MPZ_REALLOC (r, rn);
1622 r->_mp_size = sign ? - rn : rn;
1626 mpz_init_set_d (mpz_t r,
double x)
1633 mpz_get_d (
const mpz_t u)
1637 double B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1639 un = GMP_ABS (u->_mp_size);
1646 x = B*x + u->_mp_d[--un];
1648 if (u->_mp_size < 0)
1655 mpz_cmpabs_d (
const mpz_t x,
double d)
1668 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1672 for (i = 1; i < xn; i++)
1679 for (i = xn; i-- > 0;)
1696 mpz_cmp_d (
const mpz_t x,
double d)
1698 if (x->_mp_size < 0)
1703 return -mpz_cmpabs_d (x, d);
1710 return mpz_cmpabs_d (x, d);
1717 mpz_sgn (
const mpz_t u)
1719 mp_size_t usize = u->_mp_size;
1721 return (usize > 0) - (usize < 0);
1725 mpz_cmp_si (
const mpz_t u,
long v)
1727 mp_size_t usize = u->_mp_size;
1732 return mpz_cmp_ui (u, v);
1733 else if (usize >= 0)
1737 mp_limb_t ul = u->_mp_d[0];
1738 if ((mp_limb_t)GMP_NEG_CAST (
unsigned long int, v) < ul)
1741 return (mp_limb_t)GMP_NEG_CAST (
unsigned long int, v) > ul;
1746 mpz_cmp_ui (
const mpz_t u,
unsigned long v)
1748 mp_size_t usize = u->_mp_size;
1756 mp_limb_t ul = (usize > 0) ? u->_mp_d[0] : 0;
1757 return (ul > v) - (ul < v);
1762 mpz_cmp (
const mpz_t a,
const mpz_t b)
1764 mp_size_t asize = a->_mp_size;
1765 mp_size_t bsize = b->_mp_size;
1768 return (asize < bsize) ? -1 : 1;
1769 else if (asize >= 0)
1770 return mpn_cmp (a->_mp_d, b->_mp_d, asize);
1772 return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
1776 mpz_cmpabs_ui (
const mpz_t u,
unsigned long v)
1778 mp_size_t un = GMP_ABS (u->_mp_size);
1784 ul = (un == 1) ? u->_mp_d[0] : 0;
1786 return (ul > v) - (ul < v);
1790 mpz_cmpabs (
const mpz_t u,
const mpz_t v)
1792 return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
1793 v->_mp_d, GMP_ABS (v->_mp_size));
1797 mpz_abs (mpz_t r,
const mpz_t u)
1802 r->_mp_size = GMP_ABS (r->_mp_size);
1806 mpz_neg (mpz_t r,
const mpz_t u)
1811 r->_mp_size = -r->_mp_size;
1815 mpz_swap (mpz_t u, mpz_t v)
1817 MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
1818 MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
1819 MP_PTR_SWAP (u->_mp_d, v->_mp_d);
1827 mpz_abs_add_ui (mpz_t r,
const mpz_t a,
unsigned long b)
1833 an = GMP_ABS (a->_mp_size);
1840 rp = MPZ_REALLOC (r, an + 1);
1842 cy = mpn_add_1 (rp, a->_mp_d, an, b);
1852 mpz_abs_sub_ui (mpz_t r,
const mpz_t a,
unsigned long b)
1854 mp_size_t an = GMP_ABS (a->_mp_size);
1855 mp_ptr rp = MPZ_REALLOC (r, an);
1862 else if (an == 1 && a->_mp_d[0] < b)
1864 rp[0] = b - a->_mp_d[0];
1869 gmp_assert_nocarry (mpn_sub_1 (rp, a->_mp_d, an, b));
1870 return mpn_normalized_size (rp, an);
1875 mpz_add_ui (mpz_t r,
const mpz_t a,
unsigned long b)
1877 if (a->_mp_size >= 0)
1878 r->_mp_size = mpz_abs_add_ui (r, a, b);
1880 r->_mp_size = -mpz_abs_sub_ui (r, a, b);
1884 mpz_sub_ui (mpz_t r,
const mpz_t a,
unsigned long b)
1886 if (a->_mp_size < 0)
1887 r->_mp_size = -mpz_abs_add_ui (r, a, b);
1889 r->_mp_size = mpz_abs_sub_ui (r, a, b);
1893 mpz_ui_sub (mpz_t r,
unsigned long a,
const mpz_t b)
1895 if (b->_mp_size < 0)
1896 r->_mp_size = mpz_abs_add_ui (r, b, a);
1898 r->_mp_size = -mpz_abs_sub_ui (r, b, a);
1902 mpz_abs_add (mpz_t r,
const mpz_t a,
const mpz_t b)
1904 mp_size_t an = GMP_ABS (a->_mp_size);
1905 mp_size_t bn = GMP_ABS (b->_mp_size);
1911 MPZ_SRCPTR_SWAP (a, b);
1912 MP_SIZE_T_SWAP (an, bn);
1915 rp = MPZ_REALLOC (r, an + 1);
1916 cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
1924 mpz_abs_sub (mpz_t r,
const mpz_t a,
const mpz_t b)
1926 mp_size_t an = GMP_ABS (a->_mp_size);
1927 mp_size_t bn = GMP_ABS (b->_mp_size);
1931 cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
1934 rp = MPZ_REALLOC (r, an);
1935 gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
1936 return mpn_normalized_size (rp, an);
1940 rp = MPZ_REALLOC (r, bn);
1941 gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
1942 return -mpn_normalized_size (rp, bn);
1949 mpz_add (mpz_t r,
const mpz_t a,
const mpz_t b)
1953 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1954 rn = mpz_abs_add (r, a, b);
1956 rn = mpz_abs_sub (r, a, b);
1958 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1962 mpz_sub (mpz_t r,
const mpz_t a,
const mpz_t b)
1966 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1967 rn = mpz_abs_sub (r, a, b);
1969 rn = mpz_abs_add (r, a, b);
1971 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1977 mpz_mul_si (mpz_t r,
const mpz_t u,
long int v)
1981 mpz_mul_ui (r, u, GMP_NEG_CAST (
unsigned long int, v));
1985 mpz_mul_ui (r, u, (
unsigned long int) v);
1989 mpz_mul_ui (mpz_t r,
const mpz_t u,
unsigned long int v)
1997 if (us == 0 || v == 0)
2005 tp = MPZ_REALLOC (r, un + 1);
2006 cy = mpn_mul_1 (tp, u->_mp_d, un, v);
2010 r->_mp_size = (us < 0) ? - un : un;
2014 mpz_mul (mpz_t r,
const mpz_t u,
const mpz_t v)
2017 mp_size_t un, vn, rn;
2024 if (un == 0 || vn == 0)
2030 sign = (un ^ vn) < 0;
2035 mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
2039 mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
2041 mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
2044 rn -= tp[rn-1] == 0;
2046 t->_mp_size = sign ? - rn : rn;
2052 mpz_mul_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t bits)
2059 un = GMP_ABS (u->_mp_size);
2066 limbs = bits / GMP_LIMB_BITS;
2067 shift = bits % GMP_LIMB_BITS;
2069 rn = un + limbs + (shift > 0);
2070 rp = MPZ_REALLOC (r, rn);
2073 mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
2078 mpn_copyd (rp + limbs, u->_mp_d, un);
2083 r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
2087 mpz_addmul_ui (mpz_t r,
const mpz_t u,
unsigned long int v)
2091 mpz_mul_ui (t, u, v);
2097 mpz_submul_ui (mpz_t r,
const mpz_t u,
unsigned long int v)
2101 mpz_mul_ui (t, u, v);
2107 mpz_addmul (mpz_t r,
const mpz_t u,
const mpz_t v)
2117 mpz_submul (mpz_t r,
const mpz_t u,
const mpz_t v)
2128 enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
2132 mpz_div_qr (mpz_t q, mpz_t r,
2133 const mpz_t n,
const mpz_t d,
enum mpz_div_round_mode mode)
2135 mp_size_t ns, ds, nn, dn, qs;
2140 gmp_die(
"mpz_div_qr: Divide by zero.");
2158 if (mode == GMP_DIV_CEIL && qs >= 0)
2166 else if (mode == GMP_DIV_FLOOR && qs < 0)
2190 mpz_init_set (tr, n);
2197 mpz_init2 (tq, qn * GMP_LIMB_BITS);
2203 mpn_div_qr (qp, np, nn, d->_mp_d, dn);
2207 qn -= (qp[qn-1] == 0);
2209 tq->_mp_size = qs < 0 ? -qn : qn;
2211 rn = mpn_normalized_size (np, dn);
2212 tr->_mp_size = ns < 0 ? - rn : rn;
2214 if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
2217 mpz_sub_ui (tq, tq, 1);
2219 mpz_add (tr, tr, d);
2221 else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
2224 mpz_add_ui (tq, tq, 1);
2226 mpz_sub (tr, tr, d);
2244 mpz_cdiv_qr (mpz_t q, mpz_t r,
const mpz_t n,
const mpz_t d)
2246 mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
2250 mpz_fdiv_qr (mpz_t q, mpz_t r,
const mpz_t n,
const mpz_t d)
2252 mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
2256 mpz_tdiv_qr (mpz_t q, mpz_t r,
const mpz_t n,
const mpz_t d)
2258 mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
2262 mpz_cdiv_q (mpz_t q,
const mpz_t n,
const mpz_t d)
2264 mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
2268 mpz_fdiv_q (mpz_t q,
const mpz_t n,
const mpz_t d)
2270 mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
2274 mpz_tdiv_q (mpz_t q,
const mpz_t n,
const mpz_t d)
2276 mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
2280 mpz_cdiv_r (mpz_t r,
const mpz_t n,
const mpz_t d)
2282 mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
2286 mpz_fdiv_r (mpz_t r,
const mpz_t n,
const mpz_t d)
2288 mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
2292 mpz_tdiv_r (mpz_t r,
const mpz_t n,
const mpz_t d)
2294 mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
2298 mpz_mod (mpz_t r,
const mpz_t n,
const mpz_t d)
2300 mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
2304 mpz_div_q_2exp (mpz_t q,
const mpz_t u, mp_bitcnt_t bit_index,
2305 enum mpz_div_round_mode mode)
2318 limb_cnt = bit_index / GMP_LIMB_BITS;
2319 qn = GMP_ABS (un) - limb_cnt;
2320 bit_index %= GMP_LIMB_BITS;
2322 if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR))
2326 || !mpn_zero_p (u->_mp_d, limb_cnt)
2327 || (u->_mp_d[limb_cnt]
2328 & (((mp_limb_t) 1 << bit_index) - 1)));
2337 qp = MPZ_REALLOC (q, qn);
2341 mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
2342 qn -= qp[qn - 1] == 0;
2346 mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
2353 mpz_add_ui (q, q, 1);
2359 mpz_div_r_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t bit_index,
2360 enum mpz_div_round_mode mode)
2362 mp_size_t us, un, rn;
2367 if (us == 0 || bit_index == 0)
2372 rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
2375 rp = MPZ_REALLOC (r, rn);
2378 mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
2384 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR))
2390 for (cy = 1, i = 0; i < un; i++)
2392 mp_limb_t s = ~u->_mp_d[i] + cy;
2397 for (; i < rn - 1; i++)
2398 rp[i] = GMP_LIMB_MAX;
2407 mpn_copyi (rp, u->_mp_d, un);
2415 mpn_copyi (rp, u->_mp_d, rn - 1);
2417 rp[rn-1] = u->_mp_d[rn-1] & mask;
2419 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR))
2424 for (i = 0; i < rn && rp[i] == 0; i++)
2441 rn = mpn_normalized_size (rp, rn);
2442 r->_mp_size = us < 0 ? -rn : rn;
2446 mpz_cdiv_q_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t cnt)
2448 mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
2452 mpz_fdiv_q_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t cnt)
2454 mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
2458 mpz_tdiv_q_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t cnt)
2460 mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
2464 mpz_cdiv_r_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t cnt)
2466 mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
2470 mpz_fdiv_r_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t cnt)
2472 mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
2476 mpz_tdiv_r_2exp (mpz_t r,
const mpz_t u, mp_bitcnt_t cnt)
2478 mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
2482 mpz_divexact (mpz_t q,
const mpz_t n,
const mpz_t d)
2484 gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
2488 mpz_divisible_p (
const mpz_t n,
const mpz_t d)
2490 return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2494 mpz_congruent_p (
const mpz_t a,
const mpz_t b,
const mpz_t m)
2500 if (mpz_sgn (m) == 0)
2501 return (mpz_cmp (a, b) == 0);
2505 res = mpz_divisible_p (t, m);
2511 static unsigned long 2512 mpz_div_qr_ui (mpz_t q, mpz_t r,
2513 const mpz_t n,
unsigned long d,
enum mpz_div_round_mode mode)
2532 qp = MPZ_REALLOC (q, qn);
2536 rl = mpn_div_qr_1 (qp, n->_mp_d, qn, d);
2540 rs = (ns < 0) ? -rs : rs;
2542 if (rl > 0 && ( (mode == GMP_DIV_FLOOR && ns < 0)
2543 || (mode == GMP_DIV_CEIL && ns >= 0)))
2546 gmp_assert_nocarry (mpn_add_1 (qp, qp, qn, 1));
2558 qn -= (qp[qn-1] == 0);
2559 assert (qn == 0 || qp[qn-1] > 0);
2561 q->_mp_size = (ns < 0) ? - qn : qn;
2568 mpz_cdiv_qr_ui (mpz_t q, mpz_t r,
const mpz_t n,
unsigned long d)
2570 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
2574 mpz_fdiv_qr_ui (mpz_t q, mpz_t r,
const mpz_t n,
unsigned long d)
2576 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
2580 mpz_tdiv_qr_ui (mpz_t q, mpz_t r,
const mpz_t n,
unsigned long d)
2582 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
2586 mpz_cdiv_q_ui (mpz_t q,
const mpz_t n,
unsigned long d)
2588 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
2592 mpz_fdiv_q_ui (mpz_t q,
const mpz_t n,
unsigned long d)
2594 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
2598 mpz_tdiv_q_ui (mpz_t q,
const mpz_t n,
unsigned long d)
2600 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
2604 mpz_cdiv_r_ui (mpz_t r,
const mpz_t n,
unsigned long d)
2606 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
2609 mpz_fdiv_r_ui (mpz_t r,
const mpz_t n,
unsigned long d)
2611 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2614 mpz_tdiv_r_ui (mpz_t r,
const mpz_t n,
unsigned long d)
2616 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
2620 mpz_cdiv_ui (
const mpz_t n,
unsigned long d)
2622 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
2626 mpz_fdiv_ui (
const mpz_t n,
unsigned long d)
2628 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
2632 mpz_tdiv_ui (
const mpz_t n,
unsigned long d)
2634 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
2638 mpz_mod_ui (mpz_t r,
const mpz_t n,
unsigned long d)
2640 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2644 mpz_divexact_ui (mpz_t q,
const mpz_t n,
unsigned long d)
2646 gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
2650 mpz_divisible_ui_p (
const mpz_t n,
unsigned long d)
2652 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2658 mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
2662 assert ( (u | v) > 0);
2669 gmp_ctz (shift, u | v);
2675 MP_LIMB_T_SWAP (u, v);
2677 while ( (v & 1) == 0)
2687 while ( (u & 1) == 0);
2694 while ( (v & 1) == 0);
2701 mpz_gcd_ui (mpz_t g,
const mpz_t u,
unsigned long v)
2712 un = GMP_ABS (u->_mp_size);
2714 v = mpn_gcd_11 (mpn_div_qr_1 (NULL, u->_mp_d, un, v), v);
2724 mpz_make_odd (mpz_t r)
2728 assert (r->_mp_size > 0);
2730 shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
2731 mpz_tdiv_q_2exp (r, r, shift);
2737 mpz_gcd (mpz_t g,
const mpz_t u,
const mpz_t v)
2740 mp_bitcnt_t uz, vz, gz;
2742 if (u->_mp_size == 0)
2747 if (v->_mp_size == 0)
2757 uz = mpz_make_odd (tu);
2759 vz = mpz_make_odd (tv);
2760 gz = GMP_MIN (uz, vz);
2762 if (tu->_mp_size < tv->_mp_size)
2765 mpz_tdiv_r (tu, tu, tv);
2766 if (tu->_mp_size == 0)
2776 c = mpz_cmp (tu, tv);
2785 if (tv->_mp_size == 1)
2787 mp_limb_t vl = tv->_mp_d[0];
2788 mp_limb_t ul = mpz_tdiv_ui (tu, vl);
2789 mpz_set_ui (g, mpn_gcd_11 (ul, vl));
2792 mpz_sub (tu, tu, tv);
2796 mpz_mul_2exp (g, g, gz);
2800 mpz_gcdext (mpz_t g, mpz_t s, mpz_t t,
const mpz_t u,
const mpz_t v)
2802 mpz_t tu, tv, s0, s1, t0, t1;
2803 mp_bitcnt_t uz, vz, gz;
2806 if (u->_mp_size == 0)
2809 signed long sign = mpz_sgn (v);
2814 mpz_set_si (t, sign);
2818 if (v->_mp_size == 0)
2821 signed long sign = mpz_sgn (u);
2824 mpz_set_si (s, sign);
2838 uz = mpz_make_odd (tu);
2840 vz = mpz_make_odd (tv);
2841 gz = GMP_MIN (uz, vz);
2847 if (tu->_mp_size < tv->_mp_size)
2850 MPZ_SRCPTR_SWAP (u, v);
2851 MPZ_PTR_SWAP (s, t);
2852 MP_BITCNT_T_SWAP (uz, vz);
2878 mpz_setbit (t0, uz);
2879 mpz_tdiv_qr (t1, tu, tu, tv);
2880 mpz_mul_2exp (t1, t1, uz);
2882 mpz_setbit (s1, vz);
2885 if (tu->_mp_size > 0)
2888 shift = mpz_make_odd (tu);
2889 mpz_mul_2exp (t0, t0, shift);
2890 mpz_mul_2exp (s0, s0, shift);
2896 c = mpz_cmp (tu, tv);
2907 mpz_sub (tv, tv, tu);
2908 mpz_add (t0, t0, t1);
2909 mpz_add (s0, s0, s1);
2911 shift = mpz_make_odd (tv);
2912 mpz_mul_2exp (t1, t1, shift);
2913 mpz_mul_2exp (s1, s1, shift);
2917 mpz_sub (tu, tu, tv);
2918 mpz_add (t1, t0, t1);
2919 mpz_add (s1, s0, s1);
2921 shift = mpz_make_odd (tu);
2922 mpz_mul_2exp (t0, t0, shift);
2923 mpz_mul_2exp (s0, s0, shift);
2932 mpz_mul_2exp (tv, tv, gz);
2938 mpz_divexact (s1, v, tv);
2940 mpz_divexact (t1, u, tv);
2946 if (mpz_odd_p (s0) || mpz_odd_p (t0))
2948 mpz_sub (s0, s0, s1);
2949 mpz_add (t0, t0, t1);
2951 mpz_divexact_ui (s0, s0, 2);
2952 mpz_divexact_ui (t0, t0, 2);
2956 mpz_add (s1, s0, s1);
2957 if (mpz_cmpabs (s0, s1) > 0)
2960 mpz_sub (t0, t0, t1);
2962 if (u->_mp_size < 0)
2964 if (v->_mp_size < 0)
2982 mpz_lcm (mpz_t r,
const mpz_t u,
const mpz_t v)
2986 if (u->_mp_size == 0 || v->_mp_size == 0)
2995 mpz_divexact (g, u, g);
3003 mpz_lcm_ui (mpz_t r,
const mpz_t u,
unsigned long v)
3005 if (v == 0 || u->_mp_size == 0)
3011 v /= mpz_gcd_ui (NULL, u, v);
3012 mpz_mul_ui (r, u, v);
3018 mpz_invert (mpz_t r,
const mpz_t u,
const mpz_t m)
3023 if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
3029 mpz_gcdext (g, tr, NULL, u, m);
3030 invertible = (mpz_cmp_ui (g, 1) == 0);
3034 if (tr->_mp_size < 0)
3036 if (m->_mp_size >= 0)
3037 mpz_add (tr, tr, m);
3039 mpz_sub (tr, tr, m);
3053 mpz_pow_ui (mpz_t r,
const mpz_t b,
unsigned long e)
3057 mpz_init_set_ui (tr, 1);
3059 bit = GMP_ULONG_HIGHBIT;
3062 mpz_mul (tr, tr, tr);
3064 mpz_mul (tr, tr, b);
3074 mpz_ui_pow_ui (mpz_t r,
unsigned long blimb,
unsigned long e)
3077 mpz_init_set_ui (b, blimb);
3078 mpz_pow_ui (r, b, e);
3083 mpz_powm (mpz_t r,
const mpz_t b,
const mpz_t e,
const mpz_t m)
3093 en = GMP_ABS (e->_mp_size);
3094 mn = GMP_ABS (m->_mp_size);
3096 gmp_die (
"mpz_powm: Zero modulo.");
3105 mpn_div_qr_invert (&minv, mp, mn);
3114 tp = gmp_xalloc_limbs (mn);
3115 gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
3121 if (e->_mp_size < 0)
3123 if (!mpz_invert (base, b, m))
3124 gmp_die (
"mpz_powm: Negative exponent and non-invertible base.");
3131 bn = base->_mp_size;
3134 mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
3141 if (b->_mp_size < 0)
3143 mp_ptr bp = MPZ_REALLOC (base, mn);
3144 gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
3147 base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
3149 mpz_init_set_ui (tr, 1);
3153 mp_limb_t w = e->_mp_d[en];
3156 bit = GMP_LIMB_HIGHBIT;
3159 mpz_mul (tr, tr, tr);
3161 mpz_mul (tr, tr, base);
3162 if (tr->_mp_size > mn)
3164 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3165 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3173 if (tr->_mp_size >= mn)
3176 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3177 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3188 mpz_powm_ui (mpz_t r,
const mpz_t b,
unsigned long elimb,
const mpz_t m)
3191 mpz_init_set_ui (e, elimb);
3192 mpz_powm (r, b, e, m);
3198 mpz_rootrem (mpz_t x, mpz_t r,
const mpz_t y,
unsigned long z)
3203 sgn = y->_mp_size < 0;
3204 if ((~z & sgn) != 0)
3205 gmp_die (
"mpz_rootrem: Negative argument, with even root.");
3207 gmp_die (
"mpz_rootrem: Zeroth root.");
3209 if (mpz_cmpabs_ui (y, 1) <= 0) {
3220 tb = mpz_sizeinbase (y, 2) / z + 1;
3228 mpz_tdiv_q (t, y, u);
3230 mpz_tdiv_q_2exp (t, t, 1);
3231 }
while (mpz_cmpabs (t, u) < 0);
3241 mpz_pow_ui (t, u, z - 1);
3242 mpz_tdiv_q (t, y, t);
3243 mpz_mul_ui (v, u, z - 1);
3245 mpz_tdiv_q_ui (t, t, z);
3246 }
while (mpz_cmpabs (t, u) < 0);
3252 mpz_pow_ui (t, u, z);
3262 mpz_root (mpz_t x,
const mpz_t y,
unsigned long z)
3268 mpz_rootrem (x, r, y, z);
3269 res = r->_mp_size == 0;
3277 mpz_sqrtrem (mpz_t s, mpz_t r,
const mpz_t u)
3279 mpz_rootrem (s, r, u, 2);
3283 mpz_sqrt (mpz_t s,
const mpz_t u)
3285 mpz_rootrem (s, NULL, u, 2);
3289 mpz_perfect_square_p (
const mpz_t u)
3291 if (u->_mp_size <= 0)
3292 return (u->_mp_size == 0);
3294 return mpz_root (NULL, u, 2);
3298 mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
3303 assert (p [n-1] != 0);
3304 return mpz_root (NULL, mpz_roinit_n (t, p, n), 2);
3308 mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
3314 assert (p [n-1] != 0);
3318 mpz_rootrem (s, r, mpz_roinit_n (u, p, n), 2);
3320 assert (s->_mp_size == (n+1)/2);
3321 mpn_copyd (sp, s->_mp_d, s->_mp_size);
3325 mpn_copyd (rp, r->_mp_d, res);
3333 mpz_fac_ui (mpz_t x,
unsigned long n)
3335 mpz_set_ui (x, n + (n == 0));
3337 mpz_mul_ui (x, x, --n);
3341 mpz_bin_uiui (mpz_t r,
unsigned long n,
unsigned long k)
3345 mpz_set_ui (r, k <= n);
3348 k = (k <= n) ? n - k : 0;
3354 mpz_mul_ui (r, r, n--);
3356 mpz_divexact (r, r, t);
3363 gmp_millerrabin (
const mpz_t n,
const mpz_t nm1, mpz_t y,
3364 const mpz_t q, mp_bitcnt_t k)
3369 mpz_powm (y, y, q, n);
3371 if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
3376 mpz_powm_ui (y, y, 2, n);
3377 if (mpz_cmp (y, nm1) == 0)
3382 if (mpz_cmp_ui (y, 1) <= 0)
3389 #define GMP_PRIME_PRODUCT \ 3390 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) 3393 #define GMP_PRIME_MASK 0xc96996dcUL 3396 mpz_probab_prime_p (
const mpz_t n,
int reps)
3408 return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
3411 assert (n->_mp_size != 0);
3413 if (mpz_cmpabs_ui (n, 64) < 0)
3414 return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
3416 if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
3420 if (mpz_cmpabs_ui (n, 31*31) < 0)
3433 nm1->_mp_size = mpz_abs_sub_ui (nm1, n, 1);
3434 k = mpz_scan1 (nm1, 0);
3435 mpz_tdiv_q_2exp (q, nm1, k);
3437 for (j = 0, is_prime = 1; is_prime & (j < reps); j++)
3439 mpz_set_ui (y, (
unsigned long) j*j+j+41);
3440 if (mpz_cmp (y, nm1) >= 0)
3447 is_prime = gmp_millerrabin (n, nm1, y, q, k);
3482 mpz_tstbit (
const mpz_t d, mp_bitcnt_t bit_index)
3484 mp_size_t limb_index;
3493 limb_index = bit_index / GMP_LIMB_BITS;
3494 if (limb_index >= dn)
3497 shift = bit_index % GMP_LIMB_BITS;
3498 w = d->_mp_d[limb_index];
3499 bit = (w >> shift) & 1;
3505 if (shift > 0 && (w << (GMP_LIMB_BITS - shift)) > 0)
3507 while (limb_index-- > 0)
3508 if (d->_mp_d[limb_index] > 0)
3515 mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
3517 mp_size_t dn, limb_index;
3521 dn = GMP_ABS (d->_mp_size);
3523 limb_index = bit_index / GMP_LIMB_BITS;
3524 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3526 if (limb_index >= dn)
3531 dp = MPZ_REALLOC (d, limb_index + 1);
3533 dp[limb_index] = bit;
3534 for (i = dn; i < limb_index; i++)
3536 dn = limb_index + 1;
3544 cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
3547 dp = MPZ_REALLOC (d, dn + 1);
3552 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3556 mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
3558 mp_size_t dn, limb_index;
3562 dn = GMP_ABS (d->_mp_size);
3565 limb_index = bit_index / GMP_LIMB_BITS;
3566 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3568 assert (limb_index < dn);
3570 gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
3571 dn - limb_index, bit));
3572 dn = mpn_normalized_size (dp, dn);
3573 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3577 mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
3579 if (!mpz_tstbit (d, bit_index))
3581 if (d->_mp_size >= 0)
3582 mpz_abs_add_bit (d, bit_index);
3584 mpz_abs_sub_bit (d, bit_index);
3589 mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
3591 if (mpz_tstbit (d, bit_index))
3593 if (d->_mp_size >= 0)
3594 mpz_abs_sub_bit (d, bit_index);
3596 mpz_abs_add_bit (d, bit_index);
3601 mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
3603 if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
3604 mpz_abs_sub_bit (d, bit_index);
3606 mpz_abs_add_bit (d, bit_index);
3610 mpz_com (mpz_t r,
const mpz_t u)
3613 mpz_sub_ui (r, r, 1);
3617 mpz_and (mpz_t r,
const mpz_t u,
const mpz_t v)
3619 mp_size_t un, vn, rn, i;
3622 mp_limb_t ux, vx, rx;
3623 mp_limb_t uc, vc, rc;
3624 mp_limb_t ul, vl, rl;
3626 un = GMP_ABS (u->_mp_size);
3627 vn = GMP_ABS (v->_mp_size);
3630 MPZ_SRCPTR_SWAP (u, v);
3631 MP_SIZE_T_SWAP (un, vn);
3639 uc = u->_mp_size < 0;
3640 vc = v->_mp_size < 0;
3650 rp = MPZ_REALLOC (r, rn + rc);
3658 ul = (up[i] ^ ux) + uc;
3661 vl = (vp[i] ^ vx) + vc;
3664 rl = ( (ul & vl) ^ rx) + rc;
3673 ul = (up[i] ^ ux) + uc;
3676 rl = ( (ul & vx) ^ rx) + rc;
3683 rn = mpn_normalized_size (rp, rn);
3685 r->_mp_size = rx ? -rn : rn;
3689 mpz_ior (mpz_t r,
const mpz_t u,
const mpz_t v)
3691 mp_size_t un, vn, rn, i;
3694 mp_limb_t ux, vx, rx;
3695 mp_limb_t uc, vc, rc;
3696 mp_limb_t ul, vl, rl;
3698 un = GMP_ABS (u->_mp_size);
3699 vn = GMP_ABS (v->_mp_size);
3702 MPZ_SRCPTR_SWAP (u, v);
3703 MP_SIZE_T_SWAP (un, vn);
3711 uc = u->_mp_size < 0;
3712 vc = v->_mp_size < 0;
3723 rp = MPZ_REALLOC (r, rn + rc);
3731 ul = (up[i] ^ ux) + uc;
3734 vl = (vp[i] ^ vx) + vc;
3737 rl = ( (ul | vl) ^ rx) + rc;
3746 ul = (up[i] ^ ux) + uc;
3749 rl = ( (ul | vx) ^ rx) + rc;
3756 rn = mpn_normalized_size (rp, rn);
3758 r->_mp_size = rx ? -rn : rn;
3762 mpz_xor (mpz_t r,
const mpz_t u,
const mpz_t v)
3764 mp_size_t un, vn, i;
3767 mp_limb_t ux, vx, rx;
3768 mp_limb_t uc, vc, rc;
3769 mp_limb_t ul, vl, rl;
3771 un = GMP_ABS (u->_mp_size);
3772 vn = GMP_ABS (v->_mp_size);
3775 MPZ_SRCPTR_SWAP (u, v);
3776 MP_SIZE_T_SWAP (un, vn);
3784 uc = u->_mp_size < 0;
3785 vc = v->_mp_size < 0;
3792 rp = MPZ_REALLOC (r, un + rc);
3800 ul = (up[i] ^ ux) + uc;
3803 vl = (vp[i] ^ vx) + vc;
3806 rl = (ul ^ vl ^ rx) + rc;
3815 ul = (up[i] ^ ux) + uc;
3818 rl = (ul ^ ux) + rc;
3825 un = mpn_normalized_size (rp, un);
3827 r->_mp_size = rx ? -un : un;
3831 gmp_popcount_limb (mp_limb_t x)
3836 for (c = 0; x > 0; x >>= 16)
3838 unsigned w = ((x >> 1) & 0x5555) + (x & 0x5555);
3839 w = ((w >> 2) & 0x3333) + (w & 0x3333);
3840 w = ((w >> 4) & 0x0f0f) + (w & 0x0f0f);
3841 w = (w >> 8) + (w & 0x00ff);
3848 mpn_popcount (mp_srcptr p, mp_size_t n)
3853 for (c = 0, i = 0; i < n; i++)
3854 c += gmp_popcount_limb (p[i]);
3860 mpz_popcount (
const mpz_t u)
3867 return ~(mp_bitcnt_t) 0;
3869 return mpn_popcount (u->_mp_d, un);
3873 mpz_hamdist (
const mpz_t u,
const mpz_t v)
3875 mp_size_t un, vn, i;
3876 mp_limb_t uc, vc, ul, vl, comp;
3884 return ~(mp_bitcnt_t) 0;
3886 comp = - (uc = vc = (un < 0));
3898 MPN_SRCPTR_SWAP (up, un, vp, vn);
3900 for (i = 0, c = 0; i < vn; i++)
3902 ul = (up[i] ^ comp) + uc;
3905 vl = (vp[i] ^ comp) + vc;
3908 c += gmp_popcount_limb (ul ^ vl);
3914 ul = (up[i] ^ comp) + uc;
3917 c += gmp_popcount_limb (ul ^ comp);
3924 mpz_scan1 (
const mpz_t u, mp_bitcnt_t starting_bit)
3927 mp_size_t us, un, i;
3932 i = starting_bit / GMP_LIMB_BITS;
3937 return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
3943 if (starting_bit != 0)
3947 ux = mpn_zero_p (up, i);
3949 ux = - (mp_limb_t) (limb >= ux);
3953 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3956 return mpn_common_scan (limb, i, up, un, ux);
3960 mpz_scan0 (
const mpz_t u, mp_bitcnt_t starting_bit)
3963 mp_size_t us, un, i;
3967 ux = - (mp_limb_t) (us >= 0);
3969 i = starting_bit / GMP_LIMB_BITS;
3974 return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
3980 limb -= mpn_zero_p (up, i);
3983 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3985 return mpn_common_scan (limb, i, up, un, ux);
3992 mpz_sizeinbase (
const mpz_t u,
int base)
4002 assert (base <= 36);
4004 un = GMP_ABS (u->_mp_size);
4010 bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
4016 return (bits + 1) / 2;
4018 return (bits + 2) / 3;
4020 return (bits + 3) / 4;
4022 return (bits + 4) / 5;
4027 tp = gmp_xalloc_limbs (un);
4028 mpn_copyi (tp, up, un);
4029 mpn_div_qr_1_invert (&bi, base);
4035 mpn_div_qr_1_preinv (tp, tp, un, &bi);
4036 un -= (tp[un-1] == 0);
4045 mpz_get_str (
char *sp,
int base,
const mpz_t u)
4054 digits =
"0123456789abcdefghijklmnopqrstuvwxyz";
4059 digits =
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
4066 sn = 1 + mpz_sizeinbase (u, base);
4068 sp = gmp_xalloc (1 + sn);
4070 un = GMP_ABS (u->_mp_size);
4081 if (u->_mp_size < 0)
4084 bits = mpn_base_power_of_two_p (base);
4088 sn = i + mpn_get_str_bits ((
unsigned char *) sp + i, bits, u->_mp_d, un);
4094 mpn_get_base_info (&info, base);
4095 tp = gmp_xalloc_limbs (un);
4096 mpn_copyi (tp, u->_mp_d, un);
4098 sn = i + mpn_get_str_other ((
unsigned char *) sp + i, base, &info, tp, un);
4103 sp[i] = digits[(
unsigned char) sp[i]];
4110 mpz_set_str (mpz_t r,
const char *sp,
int base)
4113 mp_size_t rn, alloc;
4119 assert (base == 0 || (base >= 2 && base <= 36));
4121 while (isspace( (
unsigned char) *sp))
4124 sign = (*sp ==
'-');
4132 if (*sp ==
'x' || *sp ==
'X')
4137 else if (*sp ==
'b' || *sp ==
'B')
4150 dp = gmp_xalloc (sn + (sn == 0));
4152 for (sn = 0; *sp; sp++)
4156 if (isspace ((
unsigned char) *sp))
4158 if (*sp >=
'0' && *sp <=
'9')
4160 else if (*sp >=
'a' && *sp <=
'z')
4161 digit = *sp -
'a' + 10;
4162 else if (*sp >=
'A' && *sp <=
'Z')
4163 digit = *sp -
'A' + 10;
4177 bits = mpn_base_power_of_two_p (base);
4181 alloc = (sn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
4182 rp = MPZ_REALLOC (r, alloc);
4183 rn = mpn_set_str_bits (rp, dp, sn, bits);
4188 mpn_get_base_info (&info, base);
4189 alloc = (sn + info.exp - 1) / info.exp;
4190 rp = MPZ_REALLOC (r, alloc);
4191 rn = mpn_set_str_other (rp, dp, sn, base, &info);
4193 assert (rn <= alloc);
4196 r->_mp_size = sign ? - rn : rn;
4202 mpz_init_set_str (mpz_t r,
const char *sp,
int base)
4205 return mpz_set_str (r, sp, base);
4209 mpz_out_str (FILE *stream,
int base,
const mpz_t x)
4214 str = mpz_get_str (NULL, base, x);
4216 len = fwrite (str, 1, len, stream);
4223 gmp_detect_endian (
void)
4225 static const int i = 2;
4226 const unsigned char *p = (
const unsigned char *) &i;
4232 mpz_import (mpz_t r,
size_t count,
int order,
size_t size,
int endian,
4233 size_t nails,
const void *src)
4235 const unsigned char *p;
4236 ptrdiff_t word_step;
4249 gmp_die (
"mpz_import: Nails not supported.");
4251 assert (order == 1 || order == -1);
4252 assert (endian >= -1 && endian <= 1);
4255 endian = gmp_detect_endian ();
4257 p = (
unsigned char *) src;
4259 word_step = (order != endian) ? 2 * size : 0;
4265 p += size * (count - 1);
4266 word_step = - word_step;
4273 rn = (size * count +
sizeof(mp_limb_t) - 1) /
sizeof(mp_limb_t);
4274 rp = MPZ_REALLOC (r, rn);
4276 for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
4279 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4281 limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
4282 if (bytes ==
sizeof(mp_limb_t))
4290 assert (i + (bytes > 0) == rn);
4294 i = mpn_normalized_size (rp, i);
4300 mpz_export (
void *r,
size_t *countp,
int order,
size_t size,
int endian,
4301 size_t nails,
const mpz_t u)
4307 gmp_die (
"mpz_import: Nails not supported.");
4309 assert (order == 1 || order == -1);
4310 assert (endian >= -1 && endian <= 1);
4311 assert (size > 0 || u->_mp_size == 0);
4319 ptrdiff_t word_step;
4330 limb = u->_mp_d[un-1];
4335 k++; limb >>= CHAR_BIT;
4336 }
while (limb != 0);
4338 count = (k + (un-1) *
sizeof (mp_limb_t) + size - 1) / size;
4341 r = gmp_xalloc (count * size);
4344 endian = gmp_detect_endian ();
4346 p = (
unsigned char *) r;
4348 word_step = (order != endian) ? 2 * size : 0;
4354 p += size * (count - 1);
4355 word_step = - word_step;
4362 for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
4365 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4370 limb = u->_mp_d[i++];
4371 bytes =
sizeof (mp_limb_t);
4379 assert (k == count);