S->t[1] += (S->t[0] < inc);
}
-static FORCE_INLINE void blake2b_invalidate_state(blake2b_state* S) {
- //clear_internal_memory(S, sizeof(*S)); /* wipe */
- blake2b_set_lastblock(S); /* invalidate for further use */
-}
-
static FORCE_INLINE void blake2b_init0(blake2b_state* S) {
memset(S, 0, sizeof(*S));
memcpy(S->h, blake2b_IV, sizeof(S->h));
#if defined(_M_X64) || defined(__x86_64__)
#define HASHX_COMPILER 1
#define HASHX_COMPILER_X86
-#define hashx_compile hashx_compile_x86
+#define hashx_compile(p,c) hashx_compile_x86(p,c)
#elif defined(__aarch64__)
#define HASHX_COMPILER 1
#define HASHX_COMPILER_A64
-#define hashx_compile hashx_compile_a64
+#define hashx_compile(p,c) hashx_compile_a64(p,c)
#else
#define HASHX_COMPILER 0
-#define hashx_compile
+#define hashx_compile(p,c)
#endif
HASHX_PRIVATE bool hashx_compiler_init(hashx_ctx* compiler);
uint8_t* pos = code;
uint8_t* target = NULL;
EMIT(pos, x86_prologue);
- for (int i = 0; i < program->code_size; ++i) {
+ for (size_t i = 0; i < program->code_size; ++i) {
const instruction* instr = &program->code[i];
switch (instr->opcode)
{
#else
#define FORCE_INLINE
#endif
-#endif
\ No newline at end of file
+#endif
return type <= INSTR_MUL_R;
}
+#ifdef HASHX_PROGRAM_STATS
/* If the instruction is a 64x64->128 bit multiplication. */
static inline bool is_wide_mul(instr_type type) {
return type < INSTR_MUL_R;
}
+#endif
/* Ivy Bridge integer execution ports: P0, P1, P5 */
typedef enum execution_port {
typedef struct register_info {
int latency; /* cycle when the register value will be ready */
instr_type last_op; /* last op applied to the register */
- int last_op_par; /* parameter of the last op (-1 = constant) */
+ uint32_t last_op_par; /* parameter of the last op (~0 = constant) */
} register_info;
typedef struct program_item {
execution_port ports[PORT_MAP_SIZE][NUM_PORTS];
} generator_ctx;
-const static instr_template tpl_umulh_r = {
+static const instr_template tpl_umulh_r = {
.type = INSTR_UMULH_R,
.x86_asm = "mul r",
.x86_size = 9, /* mov, mul, mov */
.has_dst = true,
};
-const static instr_template tpl_smulh_r = {
+static const instr_template tpl_smulh_r = {
.type = INSTR_SMULH_R,
.x86_asm = "imul r",
.x86_size = 9, /* mov, mul, mov */
.has_dst = true,
};
-const static instr_template tpl_mul_r = {
+static const instr_template tpl_mul_r = {
.type = INSTR_MUL_R,
.x86_asm = "imul r,r",
.x86_size = 4,
.has_dst = true,
};
-const static instr_template tpl_sub_r = {
+static const instr_template tpl_sub_r = {
.type = INSTR_SUB_R,
.x86_asm = "sub r,r",
.x86_size = 3,
.has_dst = true,
};
-const static instr_template tpl_xor_r = {
+static const instr_template tpl_xor_r = {
.type = INSTR_XOR_R,
.x86_asm = "xor r,r",
.x86_size = 3,
.has_dst = true,
};
-const static instr_template tpl_add_rs = {
+static const instr_template tpl_add_rs = {
.type = INSTR_ADD_RS,
.x86_asm = "lea r,r+r*s",
.x86_size = 4,
.has_dst = true,
};
-const static instr_template tpl_ror_c = {
+static const instr_template tpl_ror_c = {
.type = INSTR_ROR_C,
.x86_asm = "ror r,i",
.x86_size = 4,
.has_dst = true,
};
-const static instr_template tpl_add_c = {
+static const instr_template tpl_add_c = {
.type = INSTR_ADD_C,
.x86_asm = "add r,i",
.x86_size = 7,
.has_dst = true,
};
-const static instr_template tpl_xor_c = {
+static const instr_template tpl_xor_c = {
.type = INSTR_XOR_C,
.x86_asm = "xor r,i",
.x86_size = 7,
};
-const static instr_template tpl_target = {
+static const instr_template tpl_target = {
.type = INSTR_TARGET,
.x86_asm = "cmovz esi, edi",
.x86_size = 5, /* test, cmovz */
.has_dst = false,
};
-const static instr_template tpl_branch = {
+static const instr_template tpl_branch = {
.type = INSTR_BRANCH,
.x86_asm = "jz target",
.x86_size = 10, /* or, test, jz */
.has_dst = false,
};
-const static instr_template* instr_lookup[] = {
+static const instr_template* instr_lookup[] = {
&tpl_ror_c,
&tpl_xor_c,
&tpl_add_c,
&tpl_add_rs,
};
-const static instr_template* wide_mul_lookup[] = {
+static const instr_template* wide_mul_lookup[] = {
&tpl_smulh_r,
&tpl_umulh_r
};
-const static instr_template* mul_lookup = &tpl_mul_r;
-const static instr_template* target_lookup = &tpl_target;
-const static instr_template* branch_lookup = &tpl_branch;
+static const instr_template* mul_lookup = &tpl_mul_r;
+static const instr_template* target_lookup = &tpl_target;
+static const instr_template* branch_lookup = &tpl_branch;
-const static program_item item_mul = {
+static const program_item item_mul = {
.templates = &mul_lookup,
.mask0 = 0,
.mask1 = 0,
.duplicates = true
};
-const static program_item item_target = {
+static const program_item item_target = {
.templates = &target_lookup,
.mask0 = 0,
.mask1 = 0,
.duplicates = true
};
-const static program_item item_branch = {
+static const program_item item_branch = {
.templates = &branch_lookup,
.mask0 = 0,
.mask1 = 0,
.duplicates = true
};
-const static program_item item_wide_mul = {
+static const program_item item_wide_mul = {
.templates = wide_mul_lookup,
.mask0 = 1,
.mask1 = 1,
.duplicates = true
};
-const static program_item item_any = {
+static const program_item item_any = {
.templates = instr_lookup,
.mask0 = 7,
.mask1 = 3, /* instructions that don't need a src register */
.duplicates = false
};
-const static program_item* program_layout[] = {
+static const program_item* program_layout[] = {
&item_mul,
&item_target,
&item_any,
.mul_count = 0,
.chain_mul = false,
.latency = 0,
- .ports = { 0 }
+ .ports = {{ 0 }}
};
hashx_siphash_rng_init(&ctx.gen, key);
for (int i = 0; i < 8; ++i) {
ctx.registers[i].last_op = -1;
ctx.registers[i].latency = 0;
- ctx.registers[i].last_op_par = -1;
+ ctx.registers[i].last_op_par = (uint32_t)-1;
}
program->code_size = 0;
#ifndef HAVE_UMULH
#define LO(x) ((x)&0xffffffff)
#define HI(x) ((x)>>32)
-uint64_t umulh(uint64_t a, uint64_t b) {
+static uint64_t umulh(uint64_t a, uint64_t b) {
uint64_t ah = HI(a), al = LO(a);
uint64_t bh = HI(b), bl = LO(b);
uint64_t x00 = al * bl;
#endif
#ifndef HAVE_SMULH
-int64_t smulh(int64_t a, int64_t b) {
+static int64_t smulh(int64_t a, int64_t b) {
int64_t hi = umulh(a, b);
if (a < 0LL) hi -= b;
if (b < 0LL) hi -= a;
static FORCE_INLINE uint64_t sign_extend_2s_compl(uint32_t x) {
return (-1 == ~0) ?
- (int64_t)(int32_t)(x) :
+ (uint64_t)(int64_t)(int32_t)(x) :
(x > INT32_MAX ? (x | 0xffffffff00000000ULL) : (uint64_t)x);
}
void hashx_program_execute(const hashx_program* program, uint64_t r[8]) {
- int target = 0;
+ size_t target = 0;
bool branch_enable = true;
uint32_t result = 0;
+#ifdef HASHX_PROGRAM_STATS
int branch_idx = 0;
- for (int i = 0; i < program->code_size; ++i) {
+#endif
+ for (size_t i = 0; i < program->code_size; ++i) {
const instruction* instr = &program->code[i];
switch (instr->opcode)
{
case INSTR_UMULH_R:
- result = r[instr->dst] = umulh(r[instr->dst], r[instr->src]);
+ result = (uint32_t) (r[instr->dst] = umulh(r[instr->dst],
+ r[instr->src]));
break;
case INSTR_SMULH_R:
- result = r[instr->dst] = smulh(r[instr->dst], r[instr->src]);
+ result = (uint32_t) (r[instr->dst] = smulh(r[instr->dst],
+ r[instr->src]));
break;
case INSTR_MUL_R:
r[instr->dst] *= r[instr->src];
((hashx_program*)program)->branches[branch_idx]++;
#endif
}
+#ifdef HASHX_PROGRAM_STATS
branch_idx++;
+#endif
break;
default:
UNREACHABLE;
gen->count32 = sizeof(gen->buffer32) / sizeof(uint32_t);
}
gen->count32--;
- return gen->buffer32 >> (gen->count32 * 32);
+ return (uint32_t)(gen->buffer32 >> (gen->count32 * 32));
}
static const uint64_t counter2 = 123456;
static const uint64_t counter3 = 987654321123456789;
+#ifdef HASHX_BLOCK_MODE
static const unsigned char long_input[] = {
0x0b, 0x0b, 0x98, 0xbe, 0xa7, 0xe8, 0x05, 0xe0, 0x01, 0x0a, 0x21, 0x26,
0xd2, 0x87, 0xa2, 0xa0, 0xcc, 0x83, 0x3d, 0x31, 0x2c, 0xb7, 0x86, 0x38,
0x4e, 0xca, 0x62, 0x92, 0x76, 0x81, 0x7b, 0x56, 0xf3, 0x2e, 0x9b, 0x68,
0xbd, 0x82, 0xf4, 0x16
};
+#endif
#define RUN_TEST(x) run_test(#x, &x)
void hashx_vm_free(void* ptr, size_t bytes) {
#ifdef HASHX_WIN
+ (void)bytes;
VirtualFree(ptr, 0, MEM_RELEASE);
#else
munmap(ptr, bytes);
static bool verify_order(const equix_solution* solution) {
return
- tree_cmp4(&solution->idx[0], &solution->idx[4]) &
- tree_cmp2(&solution->idx[0], &solution->idx[2]) &
- tree_cmp2(&solution->idx[4], &solution->idx[6]) &
- tree_cmp1(&solution->idx[0], &solution->idx[1]) &
- tree_cmp1(&solution->idx[2], &solution->idx[3]) &
- tree_cmp1(&solution->idx[4], &solution->idx[5]) &
+ tree_cmp4(&solution->idx[0], &solution->idx[4]) &&
+ tree_cmp2(&solution->idx[0], &solution->idx[2]) &&
+ tree_cmp2(&solution->idx[4], &solution->idx[6]) &&
+ tree_cmp1(&solution->idx[0], &solution->idx[1]) &&
+ tree_cmp1(&solution->idx[2], &solution->idx[3]) &&
+ tree_cmp1(&solution->idx[4], &solution->idx[5]) &&
tree_cmp1(&solution->idx[6], &solution->idx[7]);
}
CLEAR(heap->scratch_ht.counts);
u32 cpl_buck_size = STAGE1_SIZE(cpl_bucket);
for (u32 item_idx = 0; item_idx < cpl_buck_size; ++item_idx) {
- stage1_data_item value = STAGE1_DATA(cpl_bucket, item_idx);
- u32 fine_buck_idx = value % NUM_FINE_BUCKETS;
- u32 fine_item_idx = SCRATCH_SIZE(fine_buck_idx);
- if (fine_item_idx >= FINE_BUCKET_ITEMS)
- continue;
- SCRATCH_SIZE(fine_buck_idx) = fine_item_idx + 1;
- SCRATCH(fine_buck_idx, fine_item_idx) = item_idx;
+ {
+ stage1_data_item value = STAGE1_DATA(cpl_bucket, item_idx);
+ u32 fine_buck_idx = value % NUM_FINE_BUCKETS;
+ u32 fine_item_idx = SCRATCH_SIZE(fine_buck_idx);
+ if (fine_item_idx >= FINE_BUCKET_ITEMS)
+ continue;
+ SCRATCH_SIZE(fine_buck_idx) = fine_item_idx + 1;
+ SCRATCH(fine_buck_idx, fine_item_idx) = item_idx;
+ }
if (cpl_bucket == bucket_idx) {
MAKE_PAIRS1
}
STAGE3_IDX(s3_buck_id, s3_item_id) = \
MAKE_ITEM(bucket_idx, item_idx, cpl_index); \
STAGE3_DATA(s3_buck_id, s3_item_id) = \
- sum / NUM_COARSE_BUCKETS; /* 22 bits */ \
+ (stage3_data_item)(sum / NUM_COARSE_BUCKETS); /* 22 bits */ \
} \
static void solve_stage2(solver_heap* heap) {
CLEAR(heap->scratch_ht.counts);
u32 cpl_buck_size = STAGE2_SIZE(cpl_bucket);
for (u32 item_idx = 0; item_idx < cpl_buck_size; ++item_idx) {
- stage2_data_item value = STAGE2_DATA(cpl_bucket, item_idx);
- u32 fine_buck_idx = value % NUM_FINE_BUCKETS;
- u32 fine_item_idx = SCRATCH_SIZE(fine_buck_idx);
- if (fine_item_idx >= FINE_BUCKET_ITEMS)
- continue;
- SCRATCH_SIZE(fine_buck_idx) = fine_item_idx + 1;
- SCRATCH(fine_buck_idx, fine_item_idx) = item_idx;
+ {
+ stage2_data_item value = STAGE2_DATA(cpl_bucket, item_idx);
+ u32 fine_buck_idx = value % NUM_FINE_BUCKETS;
+ u32 fine_item_idx = SCRATCH_SIZE(fine_buck_idx);
+ if (fine_item_idx >= FINE_BUCKET_ITEMS)
+ continue;
+ SCRATCH_SIZE(fine_buck_idx) = fine_item_idx + 1;
+ SCRATCH(fine_buck_idx, fine_item_idx) = item_idx;
+ }
if (cpl_bucket == bucket_idx) {
MAKE_PAIRS2
}
for (u32 bucket_idx = BUCK_START; bucket_idx < BUCK_END; ++bucket_idx) {
u32 cpl_bucket = -bucket_idx & (NUM_COARSE_BUCKETS - 1);
- bool nodup = cpl_bucket == bucket_idx;
CLEAR(heap->scratch_ht.counts);
u32 cpl_buck_size = STAGE3_SIZE(cpl_bucket);
for (u32 item_idx = 0; item_idx < cpl_buck_size; ++item_idx) {
- stage3_data_item value = STAGE3_DATA(cpl_bucket, item_idx);
- u32 fine_buck_idx = value % NUM_FINE_BUCKETS;
- u32 fine_item_idx = SCRATCH_SIZE(fine_buck_idx);
- if (fine_item_idx >= FINE_BUCKET_ITEMS)
- continue;
- SCRATCH_SIZE(fine_buck_idx) = fine_item_idx + 1;
- SCRATCH(fine_buck_idx, fine_item_idx) = item_idx;
+ {
+ stage3_data_item value = STAGE3_DATA(cpl_bucket, item_idx);
+ u32 fine_buck_idx = value % NUM_FINE_BUCKETS;
+ u32 fine_item_idx = SCRATCH_SIZE(fine_buck_idx);
+ if (fine_item_idx >= FINE_BUCKET_ITEMS)
+ continue;
+ SCRATCH_SIZE(fine_buck_idx) = fine_item_idx + 1;
+ SCRATCH(fine_buck_idx, fine_item_idx) = item_idx;
+ }
if (cpl_bucket == bucket_idx) {
MAKE_PAIRS3
}