X-Git-Url: https://www.flypig.org.uk/git/?a=blobdiff_plain;f=OpenCL%2Fm05000_a0.cl;h=44a3e62e17fb2c00feed68e9f2abb405ffc86ce1;hb=161a6eb4bc643d8e636e96eda613f5137d30da59;hp=08371b44bd4ffb7c89051df355c4b6ef75aa7eac;hpb=0bf4e3c34a6a799ccc34f403bed70119574ca9c8;p=hashcat.git diff --git a/OpenCL/m05000_a0.cl b/OpenCL/m05000_a0.cl index 08371b4..44a3e62 100644 --- a/OpenCL/m05000_a0.cl +++ b/OpenCL/m05000_a0.cl @@ -5,22 +5,16 @@ #define _KECCAK_ -#include "include/constants.h" -#include "include/kernel_vendor.h" +#define NEW_SIMD_CODE -#define DGST_R0 2 -#define DGST_R1 3 -#define DGST_R2 4 -#define DGST_R3 5 - -#include "include/kernel_functions.c" -#include "types_ocl.c" -#include "common.c" -#include "include/rp_gpu.h" -#include "rp.c" - -#define COMPARE_S "check_single_comp4.c" -#define COMPARE_M "check_multi_comp4.c" +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" +#include "inc_rp.h" +#include "inc_rp.cl" +#include "inc_simd.cl" __constant u64 keccakf_rndc[24] = { @@ -34,18 +28,6 @@ __constant u64 keccakf_rndc[24] = 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 }; -__constant u32 keccakf_rotc[24] = -{ - 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, - 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 -}; - -__constant u32 keccakf_piln[24] = -{ - 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, - 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 -}; - #ifndef KECCAK_ROUNDS #define KECCAK_ROUNDS 24 #endif @@ -63,8 +45,8 @@ __constant u32 keccakf_piln[24] = #define Rho_Pi(s) \ { \ - u32 j = keccakf_piln[s]; \ - u32 k = keccakf_rotc[s]; \ + u32 j = keccakf_piln[s]; \ + u32 k = keccakf_rotc[s]; \ bc0 = st[j]; \ st[j] = rotl64 (t, k); \ t = bc0; \ @@ -84,7 +66,7 @@ __constant u32 keccakf_piln[24] = st[4 + s] ^= ~bc0 & bc1; \ } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__global pw_t *pws, __global gpu_rule_t * rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_m04 (__global pw_t *pws, __global kernel_rule_t * rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier @@ -101,21 +83,35 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo if (gid >= gid_max) return; u32 pw_buf0[4]; - - pw_buf0[0] = pws[gid].i[ 0]; - pw_buf0[1] = pws[gid].i[ 1]; - pw_buf0[2] = pws[gid].i[ 2]; - pw_buf0[3] = pws[gid].i[ 3]; - u32 pw_buf1[4]; - pw_buf1[0] = pws[gid].i[ 4]; - pw_buf1[1] = pws[gid].i[ 5]; - pw_buf1[2] = pws[gid].i[ 6]; - pw_buf1[3] = pws[gid].i[ 7]; + pw_buf0[0] = pws[gid].i[0]; + pw_buf0[1] = pws[gid].i[1]; + pw_buf0[2] = pws[gid].i[2]; + pw_buf0[3] = pws[gid].i[3]; + pw_buf1[0] = pws[gid].i[4]; + pw_buf1[1] = pws[gid].i[5]; + pw_buf1[2] = pws[gid].i[6]; + pw_buf1[3] = pws[gid].i[7]; const u32 pw_len = pws[gid].pw_len; + /** + * constants + */ + + const u8 keccakf_rotc[24] = + { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 + }; + + const u8 keccakf_piln[24] = + { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 + }; + /** * 0x80 keccak, very special */ @@ -130,56 +126,27 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo * loop */ - for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - u32 w0[4]; - - w0[0] = pw_buf0[0]; - w0[1] = pw_buf0[1]; - w0[2] = pw_buf0[2]; - w0[3] = pw_buf0[3]; - - u32 w1[4]; + u32x w0[4] = { 0 }; + u32x w1[4] = { 0 }; + u32x w2[4] = { 0 }; + u32x w3[4] = { 0 }; - w1[0] = pw_buf1[0]; - w1[1] = pw_buf1[1]; - w1[2] = pw_buf1[2]; - w1[3] = pw_buf1[3]; + const u32x out_len = apply_rules_vect (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w0, w1); - u32 w2[4]; + append_0x01_2x4_VV (w0, w1, out_len); - w2[0] = 0; - w2[1] = 0; - w2[2] = 0; - w2[3] = 0; + /** + * Keccak + */ - u32 w3[4]; - - w3[0] = 0; - w3[1] = 0; - w3[2] = 0; - w3[3] = 0; - - const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len); - - append_0x01_2 (w0, w1, out_len); - - u64 st[25]; - - #ifdef VECT_SIZE1 - st[ 0] = (u64x) (w0[0]) | (u64x) (w0[1]) << 32; - st[ 1] = (u64x) (w0[2]) | (u64x) (w0[3]) << 32; - st[ 2] = (u64x) (w1[0]) | (u64x) (w1[1]) << 32; - st[ 3] = (u64x) (w1[2]) | (u64x) (w1[3]) << 32; - #endif - - #ifdef VECT_SIZE2 - st[ 0] = (u64x) (w0[0].s0, w0[0].s1) | (u64x) (w0[1].s0, w0[1].s1) << 32; - st[ 1] = (u64x) (w0[2].s0, w0[2].s1) | (u64x) (w0[3].s0, w0[3].s1) << 32; - st[ 2] = (u64x) (w1[0].s0, w1[0].s1) | (u64x) (w1[1].s0, w1[1].s1) << 32; - st[ 3] = (u64x) (w1[2].s0, w1[2].s1) | (u64x) (w1[3].s0, w1[3].s1) << 32; - #endif + u64x st[25]; + st[ 0] = hl32_to_64 (w0[1], w0[0]); + st[ 1] = hl32_to_64 (w0[3], w0[2]); + st[ 2] = hl32_to_64 (w1[1], w1[0]); + st[ 3] = hl32_to_64 (w1[3], w1[2]); st[ 4] = 0; st[ 5] = 0; st[ 6] = 0; @@ -210,13 +177,13 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo { // Theta - u64 bc0 = Theta1 (0); - u64 bc1 = Theta1 (1); - u64 bc2 = Theta1 (2); - u64 bc3 = Theta1 (3); - u64 bc4 = Theta1 (4); + u64x bc0 = Theta1 (0); + u64x bc1 = Theta1 (1); + u64x bc2 = Theta1 (2); + u64x bc3 = Theta1 (3); + u64x bc4 = Theta1 (4); - u64 t; + u64x t; t = bc4 ^ rotl64 (bc1, 1); Theta2 (0); t = bc0 ^ rotl64 (bc2, 1); Theta2 (1); @@ -266,24 +233,24 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo st[0] ^= keccakf_rndc[round]; } - const u32 r0 = l32_from_64 (st[1]); - const u32 r1 = h32_from_64 (st[1]); - const u32 r2 = l32_from_64 (st[2]); - const u32 r3 = h32_from_64 (st[2]); + const u32x r0 = l32_from_64 (st[1]); + const u32x r1 = h32_from_64 (st[1]); + const u32x r2 = l32_from_64 (st[2]); + const u32x r3 = h32_from_64 (st[2]); - #include COMPARE_M + COMPARE_M_SIMD (r0, r1, r2, r3); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__global pw_t *pws, __global gpu_rule_t * rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_s04 (__global pw_t *pws, __global kernel_rule_t * rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier @@ -300,31 +267,33 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo if (gid >= gid_max) return; u32 pw_buf0[4]; - - pw_buf0[0] = pws[gid].i[ 0]; - pw_buf0[1] = pws[gid].i[ 1]; - pw_buf0[2] = pws[gid].i[ 2]; - pw_buf0[3] = pws[gid].i[ 3]; - u32 pw_buf1[4]; - pw_buf1[0] = pws[gid].i[ 4]; - pw_buf1[1] = pws[gid].i[ 5]; - pw_buf1[2] = pws[gid].i[ 6]; - pw_buf1[3] = pws[gid].i[ 7]; + pw_buf0[0] = pws[gid].i[0]; + pw_buf0[1] = pws[gid].i[1]; + pw_buf0[2] = pws[gid].i[2]; + pw_buf0[3] = pws[gid].i[3]; + pw_buf1[0] = pws[gid].i[4]; + pw_buf1[1] = pws[gid].i[5]; + pw_buf1[2] = pws[gid].i[6]; + pw_buf1[3] = pws[gid].i[7]; const u32 pw_len = pws[gid].pw_len; /** - * digest + * constants */ - const u32 search[4] = + const u8 keccakf_rotc[24] = { - digests_buf[digests_offset].digest_buf[DGST_R0], - digests_buf[digests_offset].digest_buf[DGST_R1], - digests_buf[digests_offset].digest_buf[DGST_R2], - digests_buf[digests_offset].digest_buf[DGST_R3] + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 + }; + + const u8 keccakf_piln[24] = + { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 }; /** @@ -338,59 +307,42 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo const u32 add80w = (rsiz - 1) / 8; /** - * loop + * digest */ - for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++) + const u32 search[4] = { - u32 w0[4]; - - w0[0] = pw_buf0[0]; - w0[1] = pw_buf0[1]; - w0[2] = pw_buf0[2]; - w0[3] = pw_buf0[3]; - - u32 w1[4]; - - w1[0] = pw_buf1[0]; - w1[1] = pw_buf1[1]; - w1[2] = pw_buf1[2]; - w1[3] = pw_buf1[3]; - - u32 w2[4]; - - w2[0] = 0; - w2[1] = 0; - w2[2] = 0; - w2[3] = 0; - - u32 w3[4]; + digests_buf[digests_offset].digest_buf[DGST_R0], + digests_buf[digests_offset].digest_buf[DGST_R1], + digests_buf[digests_offset].digest_buf[DGST_R2], + digests_buf[digests_offset].digest_buf[DGST_R3] + }; - w3[0] = 0; - w3[1] = 0; - w3[2] = 0; - w3[3] = 0; + /** + * loop + */ - const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len); + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) + { + u32x w0[4] = { 0 }; + u32x w1[4] = { 0 }; + u32x w2[4] = { 0 }; + u32x w3[4] = { 0 }; - append_0x01_2 (w0, w1, out_len); + const u32x out_len = apply_rules_vect (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w0, w1); - u64 st[25]; + append_0x01_2x4_VV (w0, w1, out_len); - #ifdef VECT_SIZE1 - st[ 0] = (u64x) (w0[0]) | (u64x) (w0[1]) << 32; - st[ 1] = (u64x) (w0[2]) | (u64x) (w0[3]) << 32; - st[ 2] = (u64x) (w1[0]) | (u64x) (w1[1]) << 32; - st[ 3] = (u64x) (w1[2]) | (u64x) (w1[3]) << 32; - #endif + /** + * Keccak + */ - #ifdef VECT_SIZE2 - st[ 0] = (u64x) (w0[0].s0, w0[0].s1) | (u64x) (w0[1].s0, w0[1].s1) << 32; - st[ 1] = (u64x) (w0[2].s0, w0[2].s1) | (u64x) (w0[3].s0, w0[3].s1) << 32; - st[ 2] = (u64x) (w1[0].s0, w1[0].s1) | (u64x) (w1[1].s0, w1[1].s1) << 32; - st[ 3] = (u64x) (w1[2].s0, w1[2].s1) | (u64x) (w1[3].s0, w1[3].s1) << 32; - #endif + u64x st[25]; + st[ 0] = hl32_to_64 (w0[1], w0[0]); + st[ 1] = hl32_to_64 (w0[3], w0[2]); + st[ 2] = hl32_to_64 (w1[1], w1[0]); + st[ 3] = hl32_to_64 (w1[3], w1[2]); st[ 4] = 0; st[ 5] = 0; st[ 6] = 0; @@ -421,13 +373,13 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo { // Theta - u64 bc0 = Theta1 (0); - u64 bc1 = Theta1 (1); - u64 bc2 = Theta1 (2); - u64 bc3 = Theta1 (3); - u64 bc4 = Theta1 (4); + u64x bc0 = Theta1 (0); + u64x bc1 = Theta1 (1); + u64x bc2 = Theta1 (2); + u64x bc3 = Theta1 (3); + u64x bc4 = Theta1 (4); - u64 t; + u64x t; t = bc4 ^ rotl64 (bc1, 1); Theta2 (0); t = bc0 ^ rotl64 (bc2, 1); Theta2 (1); @@ -477,19 +429,19 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo st[0] ^= keccakf_rndc[round]; } - const u32 r0 = l32_from_64 (st[1]); - const u32 r1 = h32_from_64 (st[1]); - const u32 r2 = l32_from_64 (st[2]); - const u32 r3 = h32_from_64 (st[2]); + const u32x r0 = l32_from_64 (st[1]); + const u32x r1 = h32_from_64 (st[1]); + const u32x r2 = l32_from_64 (st[2]); + const u32x r3 = h32_from_64 (st[2]); - #include COMPARE_S + COMPARE_S_SIMD (r0, r1, r2, r3); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { }