Skip to content

math.big #

Constants #

const (
	zero_int = Integer{
		digits: []u32{len: 0}
		signum: 0
		is_const: true
	}
	one_int = Integer{
		digits: [u32(1)]
		signum: 1
		is_const: true
	}
	two_int = Integer{
		digits: [u32(2)]
		signum: 1
		is_const: true
	}
)

fn integer_from_bytes #

fn integer_from_bytes(input []u8, config IntegerConfig) Integer

integer_from_bytes creates a new big.Integer from the given byte array.
By default, positive integers are assumed.
If you want a negative integer, use in the following manner: value := big.integer_from_bytes(bytes, signum: -1)

fn integer_from_i64 #

fn integer_from_i64(value i64) Integer

integer_from_i64 creates a new big.Integer from the given i64 value.

fn integer_from_int #

fn integer_from_int(value int) Integer

integer_from_int creates a new big.Integer from the given int value.

fn integer_from_radix #

fn integer_from_radix(all_characters string, radix u32) ?Integer

integer_from_radix creates a new big.Integer from the given string and radix.

fn integer_from_string #

fn integer_from_string(characters string) ?Integer

integer_from_string creates a new big.Integer from the decimal digits specified in the given string.
For other bases, use big.integer_from_radix instead.

fn integer_from_u32 #

fn integer_from_u32(value u32) Integer

integer_from_u32 creates a new big.Integer from the given u32 value.

fn integer_from_u64 #

fn integer_from_u64(value u64) Integer

integer_from_u64 creates a new big.Integer from the given u64 value.

struct Integer #

struct Integer {
	digits []u32
pub:
	signum   int
	is_const bool
}

big.Integer

It has the following properties:

  1. Every "digit" is an integer in the range [0, 2^32).
  2. The signum can be one of three values: -1, 0, +1 for negative, zero, and positive values, respectively.
  3. There should be no leading zeros in the digit array.
  4. The digits are stored in little endian format, that is, the digits with a lower positional value (towards the right when represented as a string) have a lower index, and vice versa.

fn (Integer) abs #

fn (integer Integer) abs() Integer

abs returns the absolute value of the integer.

fn (Integer) neg #

fn (integer Integer) neg() Integer

neg returns the result of negation of the integer.

fn (Integer) + #

fn (integer Integer) + (addend Integer) Integer

fn (Integer) - #

fn (integer Integer) - (subtrahend Integer) Integer

fn (Integer) * #

fn (integer Integer) * (multiplicand Integer) Integer

fn (Integer) div_mod #

fn (integer Integer) div_mod(divisor Integer) (Integer, Integer)

div_mod returns the quotient and remainder of the integer division.

fn (Integer) / #

fn (a Integer) / (b Integer) Integer

fn (Integer) % #

fn (a Integer) % (b Integer) Integer

fn (Integer) pow #

fn (a Integer) pow(exponent u32) Integer

pow returns the integer a raised to the power of the u32 exponent.

fn (Integer) mod_pow #

fn (a Integer) mod_pow(exponent u32, divisor Integer) Integer

mod_pow returns the integer a raised to the power of the u32 exponent modulo the integer divisor.

fn (Integer) big_mod_pow #

fn (a Integer) big_mod_pow(exponent Integer, divisor Integer) Integer

big_mod_power returns the integer a raised to the power of the integer exponent modulo the integer divisor.

fn (Integer) inc #

fn (mut a Integer) inc()

inc returns the integer a incremented by 1.

fn (Integer) dec #

fn (mut a Integer) dec()

dec returns the integer a decremented by 1.

fn (Integer) == #

fn (a Integer) == (b Integer) bool

fn (Integer) abs_cmp #

fn (a Integer) abs_cmp(b Integer) int

abs_cmp returns the result of comparing the magnitudes of the integers a and b.
It returns a negative int if |a| < |b|, 0 if |a| == |b|, and a positive int if |a| > |b|.

fn (Integer) < #

fn (a Integer) < (b Integer) bool

fn (Integer) get_bit #

fn (a Integer) get_bit(i u32) bool

get_bit checks whether the bit at the given index is set.

fn (Integer) set_bit #

fn (mut a Integer) set_bit(i u32, value bool)

set_bit sets the bit at the given index to the given value.

fn (Integer) bitwise_or #

fn (a Integer) bitwise_or(b Integer) Integer

bitwise_or returns the "bitwise or" of the integers a and b.

fn (Integer) bitwise_and #

fn (a Integer) bitwise_and(b Integer) Integer

bitwise_and returns the "bitwise and" of the integers a and b.

fn (Integer) bitwise_not #

fn (a Integer) bitwise_not() Integer

bitwise_not returns the "bitwise not" of the integer a.

fn (Integer) bitwise_xor #

fn (a Integer) bitwise_xor(b Integer) Integer

bitwise_xor returns the "bitwise exclusive or" of the integers a and b.

fn (Integer) lshift #

fn (a Integer) lshift(amount u32) Integer

lshift returns the integer a shifted left by amount bits.

fn (Integer) rshift #

fn (a Integer) rshift(amount u32) Integer

rshift returns the integer a shifted right by amount bits.

fn (Integer) binary_str #

fn (integer Integer) binary_str() string

binary_str returns the binary string representation of the integer a.

fn (Integer) hex #

fn (integer Integer) hex() string

hex returns the hexadecimal string representation of the integer a.

fn (Integer) radix_str #

fn (integer Integer) radix_str(radix u32) string

radix_str returns the string representation of the integer a in the specified radix.

fn (Integer) str #

fn (integer Integer) str() string

str returns the decimal string representation of the integer a.

fn (Integer) int #

fn (a Integer) int() int

int returns the integer value of the integer a.
NOTE: This may cause loss of precision.

fn (Integer) bytes #

fn (a Integer) bytes() ([]u8, int)

bytes returns the a byte representation of the integer a, along with the signum int.
NOTE: The byte array returned is in big endian order.

fn (Integer) factorial #

fn (a Integer) factorial() Integer

factorial returns the factorial of the integer a.

fn (Integer) isqrt #

fn (a Integer) isqrt() Integer

isqrt returns the closest integer square root of the given integer.

fn (Integer) gcd #

fn (a Integer) gcd(b Integer) Integer

gcd returns the greatest common divisor of the two integers a and b.

fn (Integer) bit_len #

fn (x Integer) bit_len() int

bit_len returns the number of bits required to represent the integer a.

struct IntegerConfig #

struct IntegerConfig {
	signum int = 1
}