medium_value(PyLongObject *x)
{
assert(IS_MEDIUM_VALUE(x));
- return ((stwodigits)Py_SIZE(x)) * x->ob_digit[0];
+ return ((stwodigits)Py_SIZE(x)) * x->long_value.ob_digit[0];
}
#define IS_SMALL_INT(ival) (-_PY_NSMALLNEGINTS <= (ival) && (ival) < _PY_NSMALLPOSINTS)
Py_ssize_t j = Py_ABS(Py_SIZE(v));
Py_ssize_t i = j;
- while (i > 0 && v->ob_digit[i-1] == 0)
+ while (i > 0 && v->long_value.ob_digit[i-1] == 0)
--i;
if (i != j) {
Py_SET_SIZE(v, (Py_SIZE(v) < 0) ? -(i) : i);
Return NULL and set exception if we run out of memory. */
#define MAX_LONG_DIGITS \
- ((PY_SSIZE_T_MAX - offsetof(PyLongObject, ob_digit))/sizeof(digit))
+ ((PY_SSIZE_T_MAX - offsetof(PyLongObject, long_value.ob_digit))/sizeof(digit))
PyLongObject *
_PyLong_New(Py_ssize_t size)
sizeof(PyVarObject) instead of the offsetof, but this risks being
incorrect in the presence of padding between the PyVarObject header
and the digits. */
- result = PyObject_Malloc(offsetof(PyLongObject, ob_digit) +
+ result = PyObject_Malloc(offsetof(PyLongObject, long_value.ob_digit) +
ndigits*sizeof(digit));
if (!result) {
PyErr_NoMemory();
if (result != NULL) {
Py_SET_SIZE(result, Py_SIZE(src));
while (--i >= 0) {
- result->ob_digit[i] = src->ob_digit[i];
+ result->long_value.ob_digit[i] = src->long_value.ob_digit[i];
}
}
return (PyObject *)result;
Py_ssize_t sign = x < 0 ? -1: 1;
digit abs_x = x < 0 ? -x : x;
_PyObject_InitVar((PyVarObject*)v, &PyLong_Type, sign);
- v->ob_digit[0] = abs_x;
+ v->long_value.ob_digit[0] = abs_x;
return (PyObject*)v;
}
}
PyLongObject *v = _PyLong_New(ndigits);
if (v != NULL) {
- digit *p = v->ob_digit;
+ digit *p = v->long_value.ob_digit;
Py_SET_SIZE(v, ndigits * sign);
t = abs_ival;
while (t) {
// Since the primary use-case is iterating over ranges, which
// are typically positive, only do this optimization
// for positive integers (for now).
- ((PyLongObject *)old)->ob_digit[0] =
+ ((PyLongObject *)old)->long_value.ob_digit[0] =
Py_SAFE_DOWNCAST(value, Py_ssize_t, digit);
return 0;
}
/* Construct output value. */
v = _PyLong_New(ndigits);
if (v != NULL) {
- digit *p = v->ob_digit;
+ digit *p = v->long_value.ob_digit;
Py_SET_SIZE(v, ival < 0 ? -ndigits : ndigits);
t = abs_ival;
while (t) {
if (v == NULL) { \
return NULL; \
} \
- digit *p = v->ob_digit; \
+ digit *p = v->long_value.ob_digit; \
while ((ival)) { \
*p++ = (digit)((ival) & PyLong_MASK); \
(ival) >>= PyLong_SHIFT; \
frac = ldexp(frac, (expo-1) % PyLong_SHIFT + 1);
for (i = ndig; --i >= 0; ) {
digit bits = (digit)frac;
- v->ob_digit[i] = bits;
+ v->long_value.ob_digit[i] = bits;
frac = frac - (double)bits;
frac = ldexp(frac, PyLong_SHIFT);
}
switch (i) {
case -1:
- res = -(sdigit)v->ob_digit[0];
+ res = -(sdigit)v->long_value.ob_digit[0];
break;
case 0:
res = 0;
break;
case 1:
- res = v->ob_digit[0];
+ res = v->long_value.ob_digit[0];
break;
default:
sign = 1;
}
while (--i >= 0) {
prev = x;
- x = (x << PyLong_SHIFT) | v->ob_digit[i];
+ x = (x << PyLong_SHIFT) | v->long_value.ob_digit[i];
if ((x >> PyLong_SHIFT) != prev) {
*overflow = sign;
goto exit;
v = (PyLongObject *)vv;
i = Py_SIZE(v);
switch (i) {
- case -1: return -(sdigit)v->ob_digit[0];
+ case -1: return -(sdigit)v->long_value.ob_digit[0];
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
sign = 1;
x = 0;
}
while (--i >= 0) {
prev = x;
- x = (x << PyLong_SHIFT) | v->ob_digit[i];
+ x = (x << PyLong_SHIFT) | v->long_value.ob_digit[i];
if ((x >> PyLong_SHIFT) != prev)
goto overflow;
}
}
switch (i) {
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
while (--i >= 0) {
prev = x;
- x = (x << PyLong_SHIFT) | v->ob_digit[i];
+ x = (x << PyLong_SHIFT) | v->long_value.ob_digit[i];
if ((x >> PyLong_SHIFT) != prev) {
PyErr_SetString(PyExc_OverflowError,
"Python int too large to convert "
}
switch (i) {
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
while (--i >= 0) {
prev = x;
- x = (x << PyLong_SHIFT) | v->ob_digit[i];
+ x = (x << PyLong_SHIFT) | v->long_value.ob_digit[i];
if ((x >> PyLong_SHIFT) != prev) {
PyErr_SetString(PyExc_OverflowError,
"Python int too large to convert to C size_t");
i = Py_SIZE(v);
switch (i) {
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
sign = 1;
x = 0;
i = -i;
}
while (--i >= 0) {
- x = (x << PyLong_SHIFT) | v->ob_digit[i];
+ x = (x << PyLong_SHIFT) | v->long_value.ob_digit[i];
}
return x * sign;
}
assert(v != NULL);
assert(PyLong_Check(v));
ndigits = Py_ABS(Py_SIZE(v));
- assert(ndigits == 0 || v->ob_digit[ndigits - 1] != 0);
+ assert(ndigits == 0 || v->long_value.ob_digit[ndigits - 1] != 0);
if (ndigits > 0) {
- digit msd = v->ob_digit[ndigits - 1];
+ digit msd = v->long_value.ob_digit[ndigits - 1];
if ((size_t)(ndigits - 1) > SIZE_MAX / (size_t)PyLong_SHIFT)
goto Overflow;
result = (size_t)(ndigits - 1) * (size_t)PyLong_SHIFT;
size_t numsignificantbytes; /* number of bytes that matter */
Py_ssize_t ndigits; /* number of Python int digits */
PyLongObject* v; /* result */
- Py_ssize_t idigit = 0; /* next free index in v->ob_digit */
+ Py_ssize_t idigit = 0; /* next free index in v->long_value.ob_digit */
if (n == 0)
return PyLong_FromLong(0L);
if (accumbits >= PyLong_SHIFT) {
/* There's enough to fill a Python digit. */
assert(idigit < ndigits);
- v->ob_digit[idigit] = (digit)(accum & PyLong_MASK);
+ v->long_value.ob_digit[idigit] = (digit)(accum & PyLong_MASK);
++idigit;
accum >>= PyLong_SHIFT;
accumbits -= PyLong_SHIFT;
assert(accumbits < PyLong_SHIFT);
if (accumbits) {
assert(idigit < ndigits);
- v->ob_digit[idigit] = (digit)accum;
+ v->long_value.ob_digit[idigit] = (digit)accum;
++idigit;
}
}
unsigned char* bytes, size_t n,
int little_endian, int is_signed)
{
- Py_ssize_t i; /* index into v->ob_digit */
+ Py_ssize_t i; /* index into v->long_value.ob_digit */
Py_ssize_t ndigits; /* |v->ob_size| */
twodigits accum; /* sliding register */
unsigned int accumbits; /* # bits in accum */
It's crucial that every Python digit except for the MSD contribute
exactly PyLong_SHIFT bits to the total, so first assert that the int is
normalized. */
- assert(ndigits == 0 || v->ob_digit[ndigits - 1] != 0);
+ assert(ndigits == 0 || v->long_value.ob_digit[ndigits - 1] != 0);
j = 0;
accum = 0;
accumbits = 0;
carry = do_twos_comp ? 1 : 0;
for (i = 0; i < ndigits; ++i) {
- digit thisdigit = v->ob_digit[i];
+ digit thisdigit = v->long_value.ob_digit[i];
if (do_twos_comp) {
thisdigit = (thisdigit ^ PyLong_MASK) + carry;
carry = thisdigit >> PyLong_SHIFT;
/* Construct output value. */
v = _PyLong_New(ndigits);
if (v != NULL) {
- digit *p = v->ob_digit;
+ digit *p = v->long_value.ob_digit;
Py_SET_SIZE(v, ival < 0 ? -ndigits : ndigits);
t = abs_ival;
while (t) {
}
v = _PyLong_New(ndigits);
if (v != NULL) {
- digit *p = v->ob_digit;
+ digit *p = v->long_value.ob_digit;
Py_SET_SIZE(v, negative ? -ndigits : ndigits);
t = abs_ival;
while (t) {
res = 0;
switch(Py_SIZE(v)) {
case -1:
- bytes = -(sdigit)v->ob_digit[0];
+ bytes = -(sdigit)v->long_value.ob_digit[0];
break;
case 0:
bytes = 0;
break;
case 1:
- bytes = v->ob_digit[0];
+ bytes = v->long_value.ob_digit[0];
break;
default:
res = _PyLong_AsByteArray((PyLongObject *)v, (unsigned char *)&bytes,
v = (PyLongObject*)vv;
switch(Py_SIZE(v)) {
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
res = _PyLong_AsByteArray((PyLongObject *)vv, (unsigned char *)&bytes,
v = (PyLongObject *)vv;
switch(Py_SIZE(v)) {
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
i = Py_SIZE(v);
sign = 1;
i = -i;
}
while (--i >= 0) {
- x = (x << PyLong_SHIFT) | v->ob_digit[i];
+ x = (x << PyLong_SHIFT) | v->long_value.ob_digit[i];
}
return x * sign;
}
switch (i) {
case -1:
- res = -(sdigit)v->ob_digit[0];
+ res = -(sdigit)v->long_value.ob_digit[0];
break;
case 0:
res = 0;
break;
case 1:
- res = v->ob_digit[0];
+ res = v->long_value.ob_digit[0];
break;
default:
sign = 1;
}
while (--i >= 0) {
prev = x;
- x = (x << PyLong_SHIFT) + v->ob_digit[i];
+ x = (x << PyLong_SHIFT) + v->long_value.ob_digit[i];
if ((x >> PyLong_SHIFT) != prev) {
*overflow = sign;
goto exit;
z = _PyLong_New(size);
if (z == NULL)
return NULL;
- *prem = inplace_divrem1(z->ob_digit, a->ob_digit, size, n);
+ *prem = inplace_divrem1(z->long_value.ob_digit, a->long_value.ob_digit, size, n);
return long_normalize(z);
}
assert(n > 0 && n <= PyLong_MASK);
return (PyLongObject *)PyLong_FromLong(
- (long)inplace_rem1(a->ob_digit, size, n)
+ (long)inplace_rem1(a->long_value.ob_digit, size, n)
);
}
/* convert array of base _PyLong_BASE digits in pin to an array of
base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP,
Volume 2 (3rd edn), section 4.4, Method 1b). */
- pin = a->ob_digit;
- pout = scratch->ob_digit;
+ pin = a->long_value.ob_digit;
+ pout = scratch->long_value.ob_digit;
size = 0;
for (i = size_a; --i >= 0; ) {
digit hi = pin[i];
return -1;
}
size_a_in_bits = (size_a - 1) * PyLong_SHIFT +
- bit_length_digit(a->ob_digit[size_a - 1]);
+ bit_length_digit(a->long_value.ob_digit[size_a - 1]);
/* Allow 1 character for a '-' sign. */
sz = negative + (size_a_in_bits + (bits - 1)) / bits;
}
int accumbits = 0; /* # of bits in accum */ \
Py_ssize_t i; \
for (i = 0; i < size_a; ++i) { \
- accum |= (twodigits)a->ob_digit[i] << accumbits; \
+ accum |= (twodigits)a->long_value.ob_digit[i] << accumbits; \
accumbits += PyLong_SHIFT; \
assert(accumbits >= bits); \
do { \
*/
accum = 0;
bits_in_accum = 0;
- pdigit = z->ob_digit;
+ pdigit = z->long_value.ob_digit;
p = end;
while (--p >= start) {
int k;
bits_in_accum += bits_per_char;
if (bits_in_accum >= PyLong_SHIFT) {
*pdigit++ = (digit)(accum & PyLong_MASK);
- assert(pdigit - z->ob_digit <= n);
+ assert(pdigit - z->long_value.ob_digit <= n);
accum >>= PyLong_SHIFT;
bits_in_accum -= PyLong_SHIFT;
assert(bits_in_accum < PyLong_SHIFT);
if (bits_in_accum) {
assert(bits_in_accum <= PyLong_SHIFT);
*pdigit++ = (digit)accum;
- assert(pdigit - z->ob_digit <= n);
+ assert(pdigit - z->long_value.ob_digit <= n);
}
- while (pdigit - z->ob_digit < n)
+ while (pdigit - z->long_value.ob_digit < n)
*pdigit++ = 0;
*res = z;
return 0;
/* Create an int object that can contain the largest possible
* integer with this base and length. Note that there's no
- * need to initialize z->ob_digit -- no slot is read up before
+ * need to initialize z->long_value.ob_digit -- no slot is read up before
* being stored into.
*/
double fsize_z = (double)digits * log_base_BASE[base] + 1.0;
}
/* Multiply z by convmult, and add c. */
- pz = z->ob_digit;
+ pz = z->long_value.ob_digit;
pzstop = pz + Py_SIZE(z);
for (; pz < pzstop; ++pz) {
c += (twodigits)*pz * convmult;
*res = NULL;
return 0;
}
- memcpy(tmp->ob_digit,
- z->ob_digit,
+ memcpy(tmp->long_value.ob_digit,
+ z->long_value.ob_digit,
sizeof(digit) * size_z);
Py_SETREF(z, tmp);
- z->ob_digit[size_z] = (digit)c;
+ z->long_value.ob_digit[size_z] = (digit)c;
++size_z;
}
}
}
if (size_a < size_b ||
(size_a == size_b &&
- a->ob_digit[size_a-1] < b->ob_digit[size_b-1])) {
+ a->long_value.ob_digit[size_a-1] < b->long_value.ob_digit[size_b-1])) {
/* |a| < |b|. */
*prem = (PyLongObject *)long_long((PyObject *)a);
if (*prem == NULL) {
}
if (size_b == 1) {
digit rem = 0;
- z = divrem1(a, b->ob_digit[0], &rem);
+ z = divrem1(a, b->long_value.ob_digit[0], &rem);
if (z == NULL)
return -1;
*prem = (PyLongObject *) PyLong_FromLong((long)rem);
}
if (size_a < size_b ||
(size_a == size_b &&
- a->ob_digit[size_a-1] < b->ob_digit[size_b-1])) {
+ a->long_value.ob_digit[size_a-1] < b->long_value.ob_digit[size_b-1])) {
/* |a| < |b|. */
*prem = (PyLongObject *)long_long((PyObject *)a);
return -(*prem == NULL);
}
if (size_b == 1) {
- *prem = rem1(a, b->ob_digit[0]);
+ *prem = rem1(a, b->long_value.ob_digit[0]);
if (*prem == NULL)
return -1;
}
/* normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
shift v1 left by the same amount. Results go into w and v. */
- d = PyLong_SHIFT - bit_length_digit(w1->ob_digit[size_w-1]);
- carry = v_lshift(w->ob_digit, w1->ob_digit, size_w, d);
+ d = PyLong_SHIFT - bit_length_digit(w1->long_value.ob_digit[size_w-1]);
+ carry = v_lshift(w->long_value.ob_digit, w1->long_value.ob_digit, size_w, d);
assert(carry == 0);
- carry = v_lshift(v->ob_digit, v1->ob_digit, size_v, d);
- if (carry != 0 || v->ob_digit[size_v-1] >= w->ob_digit[size_w-1]) {
- v->ob_digit[size_v] = carry;
+ carry = v_lshift(v->long_value.ob_digit, v1->long_value.ob_digit, size_v, d);
+ if (carry != 0 || v->long_value.ob_digit[size_v-1] >= w->long_value.ob_digit[size_w-1]) {
+ v->long_value.ob_digit[size_v] = carry;
size_v++;
}
- /* Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
+ /* Now v->long_value.ob_digit[size_v-1] < w->long_value.ob_digit[size_w-1], so quotient has
at most (and usually exactly) k = size_v - size_w digits. */
k = size_v - size_w;
assert(k >= 0);
*prem = NULL;
return NULL;
}
- v0 = v->ob_digit;
- w0 = w->ob_digit;
+ v0 = v->long_value.ob_digit;
+ w0 = w->long_value.ob_digit;
wm1 = w0[size_w-1];
wm2 = w0[size_w-2];
- for (vk = v0+k, ak = a->ob_digit + k; vk-- > v0;) {
+ for (vk = v0+k, ak = a->long_value.ob_digit + k; vk-- > v0;) {
/* inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
single-digit quotient q, remainder in vk[0:size_w]. */
*e = 0;
return 0.0;
}
- a_bits = bit_length_digit(a->ob_digit[a_size-1]);
+ a_bits = bit_length_digit(a->long_value.ob_digit[a_size-1]);
/* The following is an overflow-free version of the check
"if ((a_size - 1) * PyLong_SHIFT + a_bits > PY_SSIZE_T_MAX) ..." */
if (a_size >= (PY_SSIZE_T_MAX - 1) / PyLong_SHIFT + 1 &&
shift_digits = (DBL_MANT_DIG + 2 - a_bits) / PyLong_SHIFT;
shift_bits = (DBL_MANT_DIG + 2 - a_bits) % PyLong_SHIFT;
x_size = shift_digits;
- rem = v_lshift(x_digits + x_size, a->ob_digit, a_size,
+ rem = v_lshift(x_digits + x_size, a->long_value.ob_digit, a_size,
(int)shift_bits);
x_size += a_size;
x_digits[x_size++] = rem;
else {
shift_digits = (a_bits - DBL_MANT_DIG - 2) / PyLong_SHIFT;
shift_bits = (a_bits - DBL_MANT_DIG - 2) % PyLong_SHIFT;
- rem = v_rshift(x_digits, a->ob_digit + shift_digits,
+ rem = v_rshift(x_digits, a->long_value.ob_digit + shift_digits,
a_size - shift_digits, (int)shift_bits);
x_size = a_size - shift_digits;
/* For correct rounding below, we need the least significant
x_digits[0] |= 1;
else
while (shift_digits > 0)
- if (a->ob_digit[--shift_digits]) {
+ if (a->long_value.ob_digit[--shift_digits]) {
x_digits[0] |= 1;
break;
}
Py_ssize_t i = Py_ABS(Py_SIZE(a));
sdigit diff = 0;
while (--i >= 0) {
- diff = (sdigit) a->ob_digit[i] - (sdigit) b->ob_digit[i];
+ diff = (sdigit) a->long_value.ob_digit[i] - (sdigit) b->long_value.ob_digit[i];
if (diff) {
break;
}
i = Py_SIZE(v);
switch(i) {
- case -1: return v->ob_digit[0]==1 ? -2 : -(sdigit)v->ob_digit[0];
+ case -1: return v->long_value.ob_digit[0]==1 ? -2 : -(sdigit)v->long_value.ob_digit[0];
case 0: return 0;
- case 1: return v->ob_digit[0];
+ case 1: return v->long_value.ob_digit[0];
}
sign = 1;
x = 0;
}
while (--i >= 0) {
/* Here x is a quantity in the range [0, _PyHASH_MODULUS); we
- want to compute x * 2**PyLong_SHIFT + v->ob_digit[i] modulo
+ want to compute x * 2**PyLong_SHIFT + v->long_value.ob_digit[i] modulo
_PyHASH_MODULUS.
The computation of x * 2**PyLong_SHIFT % _PyHASH_MODULUS
_PyHASH_MODULUS. */
x = ((x << PyLong_SHIFT) & _PyHASH_MODULUS) |
(x >> (_PyHASH_BITS - PyLong_SHIFT));
- x += v->ob_digit[i];
+ x += v->long_value.ob_digit[i];
if (x >= _PyHASH_MODULUS)
x -= _PyHASH_MODULUS;
}
if (z == NULL)
return NULL;
for (i = 0; i < size_b; ++i) {
- carry += a->ob_digit[i] + b->ob_digit[i];
- z->ob_digit[i] = carry & PyLong_MASK;
+ carry += a->long_value.ob_digit[i] + b->long_value.ob_digit[i];
+ z->long_value.ob_digit[i] = carry & PyLong_MASK;
carry >>= PyLong_SHIFT;
}
for (; i < size_a; ++i) {
- carry += a->ob_digit[i];
- z->ob_digit[i] = carry & PyLong_MASK;
+ carry += a->long_value.ob_digit[i];
+ z->long_value.ob_digit[i] = carry & PyLong_MASK;
carry >>= PyLong_SHIFT;
}
- z->ob_digit[i] = carry;
+ z->long_value.ob_digit[i] = carry;
return long_normalize(z);
}
else if (size_a == size_b) {
/* Find highest digit where a and b differ: */
i = size_a;
- while (--i >= 0 && a->ob_digit[i] == b->ob_digit[i])
+ while (--i >= 0 && a->long_value.ob_digit[i] == b->long_value.ob_digit[i])
;
if (i < 0)
return (PyLongObject *)PyLong_FromLong(0);
- if (a->ob_digit[i] < b->ob_digit[i]) {
+ if (a->long_value.ob_digit[i] < b->long_value.ob_digit[i]) {
sign = -1;
{ PyLongObject *temp = a; a = b; b = temp; }
}
for (i = 0; i < size_b; ++i) {
/* The following assumes unsigned arithmetic
works module 2**N for some N>PyLong_SHIFT. */
- borrow = a->ob_digit[i] - b->ob_digit[i] - borrow;
- z->ob_digit[i] = borrow & PyLong_MASK;
+ borrow = a->long_value.ob_digit[i] - b->long_value.ob_digit[i] - borrow;
+ z->long_value.ob_digit[i] = borrow & PyLong_MASK;
borrow >>= PyLong_SHIFT;
borrow &= 1; /* Keep only one sign bit */
}
for (; i < size_a; ++i) {
- borrow = a->ob_digit[i] - borrow;
- z->ob_digit[i] = borrow & PyLong_MASK;
+ borrow = a->long_value.ob_digit[i] - borrow;
+ z->long_value.ob_digit[i] = borrow & PyLong_MASK;
borrow >>= PyLong_SHIFT;
borrow &= 1; /* Keep only one sign bit */
}
if (z == NULL)
return NULL;
- memset(z->ob_digit, 0, Py_SIZE(z) * sizeof(digit));
+ memset(z->long_value.ob_digit, 0, Py_SIZE(z) * sizeof(digit));
if (a == b) {
/* Efficient squaring per HAC, Algorithm 14.16:
* http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
* via exploiting that each entry in the multiplication
* pyramid appears twice (except for the size_a squares).
*/
- digit *paend = a->ob_digit + size_a;
+ digit *paend = a->long_value.ob_digit + size_a;
for (i = 0; i < size_a; ++i) {
twodigits carry;
- twodigits f = a->ob_digit[i];
- digit *pz = z->ob_digit + (i << 1);
- digit *pa = a->ob_digit + i + 1;
+ twodigits f = a->long_value.ob_digit[i];
+ digit *pz = z->long_value.ob_digit + (i << 1);
+ digit *pa = a->long_value.ob_digit + i + 1;
SIGCHECK({
Py_DECREF(z);
else { /* a is not the same as b -- gradeschool int mult */
for (i = 0; i < size_a; ++i) {
twodigits carry = 0;
- twodigits f = a->ob_digit[i];
- digit *pz = z->ob_digit + i;
- digit *pb = b->ob_digit;
- digit *pbend = b->ob_digit + size_b;
+ twodigits f = a->long_value.ob_digit[i];
+ digit *pz = z->long_value.ob_digit + i;
+ digit *pb = b->long_value.ob_digit;
+ digit *pbend = b->long_value.ob_digit + size_b;
SIGCHECK({
Py_DECREF(z);
return -1;
}
- memcpy(lo->ob_digit, n->ob_digit, size_lo * sizeof(digit));
- memcpy(hi->ob_digit, n->ob_digit + size_lo, size_hi * sizeof(digit));
+ memcpy(lo->long_value.ob_digit, n->long_value.ob_digit, size_lo * sizeof(digit));
+ memcpy(hi->long_value.ob_digit, n->long_value.ob_digit + size_lo, size_hi * sizeof(digit));
*high = long_normalize(hi);
*low = long_normalize(lo);
if (ret == NULL) goto fail;
#ifdef Py_DEBUG
/* Fill with trash, to catch reference to uninitialized digits. */
- memset(ret->ob_digit, 0xDF, Py_SIZE(ret) * sizeof(digit));
+ memset(ret->long_value.ob_digit, 0xDF, Py_SIZE(ret) * sizeof(digit));
#endif
/* 2. t1 <- ah*bh, and copy into high digits of result. */
if ((t1 = k_mul(ah, bh)) == NULL) goto fail;
assert(Py_SIZE(t1) >= 0);
assert(2*shift + Py_SIZE(t1) <= Py_SIZE(ret));
- memcpy(ret->ob_digit + 2*shift, t1->ob_digit,
+ memcpy(ret->long_value.ob_digit + 2*shift, t1->long_value.ob_digit,
Py_SIZE(t1) * sizeof(digit));
/* Zero-out the digits higher than the ah*bh copy. */
i = Py_SIZE(ret) - 2*shift - Py_SIZE(t1);
if (i)
- memset(ret->ob_digit + 2*shift + Py_SIZE(t1), 0,
+ memset(ret->long_value.ob_digit + 2*shift + Py_SIZE(t1), 0,
i * sizeof(digit));
/* 3. t2 <- al*bl, and copy into the low digits. */
}
assert(Py_SIZE(t2) >= 0);
assert(Py_SIZE(t2) <= 2*shift); /* no overlap with high digits */
- memcpy(ret->ob_digit, t2->ob_digit, Py_SIZE(t2) * sizeof(digit));
+ memcpy(ret->long_value.ob_digit, t2->long_value.ob_digit, Py_SIZE(t2) * sizeof(digit));
/* Zero out remaining digits. */
i = 2*shift - Py_SIZE(t2); /* number of uninitialized digits */
if (i)
- memset(ret->ob_digit + Py_SIZE(t2), 0, i * sizeof(digit));
+ memset(ret->long_value.ob_digit + Py_SIZE(t2), 0, i * sizeof(digit));
/* 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
* because it's fresher in cache.
*/
i = Py_SIZE(ret) - shift; /* # digits after shift */
- (void)v_isub(ret->ob_digit + shift, i, t2->ob_digit, Py_SIZE(t2));
+ (void)v_isub(ret->long_value.ob_digit + shift, i, t2->long_value.ob_digit, Py_SIZE(t2));
_Py_DECREF_INT(t2);
- (void)v_isub(ret->ob_digit + shift, i, t1->ob_digit, Py_SIZE(t1));
+ (void)v_isub(ret->long_value.ob_digit + shift, i, t1->long_value.ob_digit, Py_SIZE(t1));
_Py_DECREF_INT(t1);
/* 6. t3 <- (ah+al)(bh+bl), and add into result. */
/* Add t3. It's not obvious why we can't run out of room here.
* See the (*) comment after this function.
*/
- (void)v_iadd(ret->ob_digit + shift, i, t3->ob_digit, Py_SIZE(t3));
+ (void)v_iadd(ret->long_value.ob_digit + shift, i, t3->long_value.ob_digit, Py_SIZE(t3));
_Py_DECREF_INT(t3);
return long_normalize(ret);
ret = _PyLong_New(asize + bsize);
if (ret == NULL)
return NULL;
- memset(ret->ob_digit, 0, Py_SIZE(ret) * sizeof(digit));
+ memset(ret->long_value.ob_digit, 0, Py_SIZE(ret) * sizeof(digit));
/* Successive slices of b are copied into bslice. */
bslice = _PyLong_New(asize);
const Py_ssize_t nbtouse = Py_MIN(bsize, asize);
/* Multiply the next slice of b by a. */
- memcpy(bslice->ob_digit, b->ob_digit + nbdone,
+ memcpy(bslice->long_value.ob_digit, b->long_value.ob_digit + nbdone,
nbtouse * sizeof(digit));
Py_SET_SIZE(bslice, nbtouse);
product = k_mul(a, bslice);
goto fail;
/* Add into result. */
- (void)v_iadd(ret->ob_digit + nbdone, Py_SIZE(ret) - nbdone,
- product->ob_digit, Py_SIZE(product));
+ (void)v_iadd(ret->long_value.ob_digit + nbdone, Py_SIZE(ret) - nbdone,
+ product->long_value.ob_digit, Py_SIZE(product));
_Py_DECREF_INT(product);
bsize -= nbtouse;
static PyObject *
fast_mod(PyLongObject *a, PyLongObject *b)
{
- sdigit left = a->ob_digit[0];
- sdigit right = b->ob_digit[0];
+ sdigit left = a->long_value.ob_digit[0];
+ sdigit right = b->long_value.ob_digit[0];
sdigit mod;
assert(Py_ABS(Py_SIZE(a)) == 1);
static PyObject *
fast_floor_div(PyLongObject *a, PyLongObject *b)
{
- sdigit left = a->ob_digit[0];
- sdigit right = b->ob_digit[0];
+ sdigit left = a->long_value.ob_digit[0];
+ sdigit right = b->long_value.ob_digit[0];
sdigit div;
assert(Py_ABS(Py_SIZE(a)) == 1);
the x87 FPU set to 64-bit precision. */
a_is_small = a_size <= MANT_DIG_DIGITS ||
(a_size == MANT_DIG_DIGITS+1 &&
- a->ob_digit[MANT_DIG_DIGITS] >> MANT_DIG_BITS == 0);
+ a->long_value.ob_digit[MANT_DIG_DIGITS] >> MANT_DIG_BITS == 0);
b_is_small = b_size <= MANT_DIG_DIGITS ||
(b_size == MANT_DIG_DIGITS+1 &&
- b->ob_digit[MANT_DIG_DIGITS] >> MANT_DIG_BITS == 0);
+ b->long_value.ob_digit[MANT_DIG_DIGITS] >> MANT_DIG_BITS == 0);
if (a_is_small && b_is_small) {
double da, db;
- da = a->ob_digit[--a_size];
+ da = a->long_value.ob_digit[--a_size];
while (a_size > 0)
- da = da * PyLong_BASE + a->ob_digit[--a_size];
- db = b->ob_digit[--b_size];
+ da = da * PyLong_BASE + a->long_value.ob_digit[--a_size];
+ db = b->long_value.ob_digit[--b_size];
while (b_size > 0)
- db = db * PyLong_BASE + b->ob_digit[--b_size];
+ db = db * PyLong_BASE + b->long_value.ob_digit[--b_size];
result = da / db;
goto success;
}
/* Extreme underflow */
goto underflow_or_zero;
/* Next line is now safe from overflowing a Py_ssize_t */
- diff = diff * PyLong_SHIFT + bit_length_digit(a->ob_digit[a_size - 1]) -
- bit_length_digit(b->ob_digit[b_size - 1]);
+ diff = diff * PyLong_SHIFT + bit_length_digit(a->long_value.ob_digit[a_size - 1]) -
+ bit_length_digit(b->long_value.ob_digit[b_size - 1]);
/* Now diff = a_bits - b_bits. */
if (diff > DBL_MAX_EXP)
goto overflow;
if (x == NULL)
goto error;
for (i = 0; i < shift_digits; i++)
- x->ob_digit[i] = 0;
- rem = v_lshift(x->ob_digit + shift_digits, a->ob_digit,
+ x->long_value.ob_digit[i] = 0;
+ rem = v_lshift(x->long_value.ob_digit + shift_digits, a->long_value.ob_digit,
a_size, -shift % PyLong_SHIFT);
- x->ob_digit[a_size + shift_digits] = rem;
+ x->long_value.ob_digit[a_size + shift_digits] = rem;
}
else {
Py_ssize_t shift_digits = shift / PyLong_SHIFT;
x = _PyLong_New(a_size - shift_digits);
if (x == NULL)
goto error;
- rem = v_rshift(x->ob_digit, a->ob_digit + shift_digits,
+ rem = v_rshift(x->long_value.ob_digit, a->long_value.ob_digit + shift_digits,
a_size - shift_digits, shift % PyLong_SHIFT);
/* set inexact if any of the bits shifted out is nonzero */
if (rem)
inexact = 1;
while (!inexact && shift_digits > 0)
- if (a->ob_digit[--shift_digits])
+ if (a->long_value.ob_digit[--shift_digits])
inexact = 1;
}
long_normalize(x);
/* x //= b. If the remainder is nonzero, set inexact. We own the only
reference to x, so it's safe to modify it in-place. */
if (b_size == 1) {
- digit rem = inplace_divrem1(x->ob_digit, x->ob_digit, x_size,
- b->ob_digit[0]);
+ digit rem = inplace_divrem1(x->long_value.ob_digit, x->long_value.ob_digit, x_size,
+ b->long_value.ob_digit[0]);
long_normalize(x);
if (rem)
inexact = 1;
}
x_size = Py_ABS(Py_SIZE(x));
assert(x_size > 0); /* result of division is never zero */
- x_bits = (x_size-1)*PyLong_SHIFT+bit_length_digit(x->ob_digit[x_size-1]);
+ x_bits = (x_size-1)*PyLong_SHIFT+bit_length_digit(x->long_value.ob_digit[x_size-1]);
/* The number of extra bits that have to be rounded away. */
extra_bits = Py_MAX(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG;
/* Round by directly modifying the low digit of x. */
mask = (digit)1 << (extra_bits - 1);
- low = x->ob_digit[0] | inexact;
+ low = x->long_value.ob_digit[0] | inexact;
if ((low & mask) && (low & (3U*mask-1U)))
low += mask;
- x->ob_digit[0] = low & ~(2U*mask-1U);
+ x->long_value.ob_digit[0] = low & ~(2U*mask-1U);
/* Convert x to a double dx; the conversion is exact. */
- dx = x->ob_digit[--x_size];
+ dx = x->long_value.ob_digit[--x_size];
while (x_size > 0)
- dx = dx * PyLong_BASE + x->ob_digit[--x_size];
+ dx = dx * PyLong_BASE + x->long_value.ob_digit[--x_size];
Py_DECREF(x);
/* Check whether ldexp result will overflow a double. */
/* if modulus == 1:
return 0 */
- if ((Py_SIZE(c) == 1) && (c->ob_digit[0] == 1)) {
+ if ((Py_SIZE(c) == 1) && (c->long_value.ob_digit[0] == 1)) {
z = (PyLongObject *)PyLong_FromLong(0L);
goto Done;
}
} while(0)
i = Py_SIZE(b);
- digit bi = i ? b->ob_digit[i-1] : 0;
+ digit bi = i ? b->long_value.ob_digit[i-1] : 0;
digit bit;
if (i <= 1 && bi <= 3) {
/* aim for minimal overhead */
if (--i < 0) {
break;
}
- bi = b->ob_digit[i];
+ bi = b->long_value.ob_digit[i];
bit = (digit)1 << (PyLong_SHIFT-1);
}
}
} while(0)
for (i = Py_SIZE(b) - 1; i >= 0; --i) {
- const digit bi = b->ob_digit[i];
+ const digit bi = b->long_value.ob_digit[i];
for (j = PyLong_SHIFT - 1; j >= 0; --j) {
const int bit = (bi >> j) & 1;
pending = (pending << 1) | bit;
}
hishift = PyLong_SHIFT - remshift;
- accum = a->ob_digit[wordshift];
+ accum = a->long_value.ob_digit[wordshift];
if (a_negative) {
/*
For a positive integer a and nonnegative shift, we have:
digit sticky = 0;
for (Py_ssize_t j = 0; j < wordshift; j++) {
- sticky |= a->ob_digit[j];
+ sticky |= a->long_value.ob_digit[j];
}
accum += (PyLong_MASK >> hishift) + (digit)(sticky != 0);
}
accum >>= remshift;
for (Py_ssize_t i = 0, j = wordshift + 1; j < size_a; i++, j++) {
- accum += (twodigits)a->ob_digit[j] << hishift;
- z->ob_digit[i] = (digit)(accum & PyLong_MASK);
+ accum += (twodigits)a->long_value.ob_digit[j] << hishift;
+ z->long_value.ob_digit[i] = (digit)(accum & PyLong_MASK);
accum >>= PyLong_SHIFT;
}
assert(accum <= PyLong_MASK);
- z->ob_digit[newsize - 1] = (digit)accum;
+ z->long_value.ob_digit[newsize - 1] = (digit)accum;
z = maybe_small_long(long_normalize(z));
return (PyObject *)z;
Py_SET_SIZE(z, -Py_SIZE(z));
}
for (i = 0; i < wordshift; i++)
- z->ob_digit[i] = 0;
+ z->long_value.ob_digit[i] = 0;
accum = 0;
for (j = 0; j < oldsize; i++, j++) {
- accum |= (twodigits)a->ob_digit[j] << remshift;
- z->ob_digit[i] = (digit)(accum & PyLong_MASK);
+ accum |= (twodigits)a->long_value.ob_digit[j] << remshift;
+ z->long_value.ob_digit[i] = (digit)(accum & PyLong_MASK);
accum >>= PyLong_SHIFT;
}
if (remshift)
- z->ob_digit[newsize-1] = (digit)accum;
+ z->long_value.ob_digit[newsize-1] = (digit)accum;
else
assert(!accum);
z = long_normalize(z);
z = _PyLong_New(size_a);
if (z == NULL)
return NULL;
- v_complement(z->ob_digit, a->ob_digit, size_a);
+ v_complement(z->long_value.ob_digit, a->long_value.ob_digit, size_a);
a = z;
}
else
Py_DECREF(a);
return NULL;
}
- v_complement(z->ob_digit, b->ob_digit, size_b);
+ v_complement(z->long_value.ob_digit, b->long_value.ob_digit, size_b);
b = z;
}
else
switch(op) {
case '&':
for (i = 0; i < size_b; ++i)
- z->ob_digit[i] = a->ob_digit[i] & b->ob_digit[i];
+ z->long_value.ob_digit[i] = a->long_value.ob_digit[i] & b->long_value.ob_digit[i];
break;
case '|':
for (i = 0; i < size_b; ++i)
- z->ob_digit[i] = a->ob_digit[i] | b->ob_digit[i];
+ z->long_value.ob_digit[i] = a->long_value.ob_digit[i] | b->long_value.ob_digit[i];
break;
case '^':
for (i = 0; i < size_b; ++i)
- z->ob_digit[i] = a->ob_digit[i] ^ b->ob_digit[i];
+ z->long_value.ob_digit[i] = a->long_value.ob_digit[i] ^ b->long_value.ob_digit[i];
break;
default:
Py_UNREACHABLE();
/* Copy any remaining digits of a, inverting if necessary. */
if (op == '^' && negb)
for (; i < size_z; ++i)
- z->ob_digit[i] = a->ob_digit[i] ^ PyLong_MASK;
+ z->long_value.ob_digit[i] = a->long_value.ob_digit[i] ^ PyLong_MASK;
else if (i < size_z)
- memcpy(&z->ob_digit[i], &a->ob_digit[i],
+ memcpy(&z->long_value.ob_digit[i], &a->long_value.ob_digit[i],
(size_z-i)*sizeof(digit));
/* Complement result if negative. */
if (negz) {
Py_SET_SIZE(z, -(Py_SIZE(z)));
- z->ob_digit[size_z] = PyLong_MASK;
- v_complement(z->ob_digit, z->ob_digit, size_z+1);
+ z->long_value.ob_digit[size_z] = PyLong_MASK;
+ v_complement(z->long_value.ob_digit, z->long_value.ob_digit, size_z+1);
}
Py_DECREF(a);
alloc_b = Py_SIZE(b);
/* reduce until a fits into 2 digits */
while ((size_a = Py_SIZE(a)) > 2) {
- nbits = bit_length_digit(a->ob_digit[size_a-1]);
+ nbits = bit_length_digit(a->long_value.ob_digit[size_a-1]);
/* extract top 2*PyLong_SHIFT bits of a into x, along with
corresponding bits of b into y */
size_b = Py_SIZE(b);
Py_XDECREF(d);
return (PyObject *)r;
}
- x = (((twodigits)a->ob_digit[size_a-1] << (2*PyLong_SHIFT-nbits)) |
- ((twodigits)a->ob_digit[size_a-2] << (PyLong_SHIFT-nbits)) |
- (a->ob_digit[size_a-3] >> nbits));
+ x = (((twodigits)a->long_value.ob_digit[size_a-1] << (2*PyLong_SHIFT-nbits)) |
+ ((twodigits)a->long_value.ob_digit[size_a-2] << (PyLong_SHIFT-nbits)) |
+ (a->long_value.ob_digit[size_a-3] >> nbits));
- y = ((size_b >= size_a - 2 ? b->ob_digit[size_a-3] >> nbits : 0) |
- (size_b >= size_a - 1 ? (twodigits)b->ob_digit[size_a-2] << (PyLong_SHIFT-nbits) : 0) |
- (size_b >= size_a ? (twodigits)b->ob_digit[size_a-1] << (2*PyLong_SHIFT-nbits) : 0));
+ y = ((size_b >= size_a - 2 ? b->long_value.ob_digit[size_a-3] >> nbits : 0) |
+ (size_b >= size_a - 1 ? (twodigits)b->long_value.ob_digit[size_a-2] << (PyLong_SHIFT-nbits) : 0) |
+ (size_b >= size_a ? (twodigits)b->long_value.ob_digit[size_a-1] << (2*PyLong_SHIFT-nbits) : 0));
/* inner loop of Lehmer's algorithm; A, B, C, D never grow
larger than PyLong_MASK during the algorithm. */
if (d == NULL)
goto error;
}
- a_end = a->ob_digit + size_a;
- b_end = b->ob_digit + size_b;
+ a_end = a->long_value.ob_digit + size_a;
+ b_end = b->long_value.ob_digit + size_b;
/* compute new a and new b in parallel */
- a_digit = a->ob_digit;
- b_digit = b->ob_digit;
- c_digit = c->ob_digit;
- d_digit = d->ob_digit;
+ a_digit = a->long_value.ob_digit;
+ b_digit = b->long_value.ob_digit;
+ c_digit = c->long_value.ob_digit;
+ d_digit = d->long_value.ob_digit;
c_carry = 0;
d_carry = 0;
while (b_digit < b_end) {
assert(PyLong_Check(newobj));
Py_SET_SIZE(newobj, Py_SIZE(tmp));
for (i = 0; i < n; i++) {
- newobj->ob_digit[i] = tmp->ob_digit[i];
+ newobj->long_value.ob_digit[i] = tmp->long_value.ob_digit[i];
}
Py_DECREF(tmp);
return (PyObject *)newobj;
cmp = long_compare((PyLongObject *)twice_rem, (PyLongObject *)b);
Py_DECREF(twice_rem);
- quo_is_odd = Py_SIZE(quo) != 0 && ((quo->ob_digit[0] & 1) != 0);
+ quo_is_odd = Py_SIZE(quo) != 0 && ((quo->long_value.ob_digit[0] & 1) != 0);
if ((Py_SIZE(b) < 0 ? cmp < 0 : cmp > 0) || (cmp == 0 && quo_is_odd)) {
/* fix up quotient */
if (quo_is_neg)
{
Py_ssize_t res;
- res = offsetof(PyLongObject, ob_digit)
+ res = offsetof(PyLongObject, long_value.ob_digit)
/* using Py_MAX(..., 1) because we always allocate space for at least
one digit, even though the integer zero has a Py_SIZE of 0 */
+ Py_MAX(Py_ABS(Py_SIZE(self)), 1)*sizeof(digit);
if (ndigits == 0)
return PyLong_FromLong(0);
- msd = ((PyLongObject *)self)->ob_digit[ndigits-1];
+ msd = ((PyLongObject *)self)->long_value.ob_digit[ndigits-1];
msd_bits = bit_length_digit(msd);
if (ndigits <= PY_SSIZE_T_MAX/PyLong_SHIFT)
Py_ssize_t. */
Py_ssize_t ndigits_fast = Py_MIN(ndigits, PY_SSIZE_T_MAX/PyLong_SHIFT);
for (Py_ssize_t i = 0; i < ndigits_fast; i++) {
- bit_count += popcount_digit(z->ob_digit[i]);
+ bit_count += popcount_digit(z->long_value.ob_digit[i]);
}
PyObject *result = PyLong_FromSsize_t(bit_count);
/* Use Python integers if bit_count would overflow. */
for (Py_ssize_t i = ndigits_fast; i < ndigits; i++) {
- PyObject *x = PyLong_FromLong(popcount_digit(z->ob_digit[i]));
+ PyObject *x = PyLong_FromLong(popcount_digit(z->long_value.ob_digit[i]));
if (x == NULL) {
goto error;
}
PyTypeObject PyLong_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"int", /* tp_name */
- offsetof(PyLongObject, ob_digit), /* tp_basicsize */
+ offsetof(PyLongObject, long_value.ob_digit), /* tp_basicsize */
sizeof(digit), /* tp_itemsize */
0, /* tp_dealloc */
0, /* tp_vectorcall_offset */