X-Git-Url: https://www.flypig.org.uk/git/?a=blobdiff_plain;f=OpenCL%2Fm11800_a3.cl;h=3524e0f907d210d48428f7d48b849e5986c4e4f6;hb=161a6eb4bc643d8e636e96eda613f5137d30da59;hp=b1e81d70b1c287f374eccd5dba4d4d2b08fed0b6;hpb=0bf4e3c34a6a799ccc34f403bed70119574ca9c8;p=hashcat.git diff --git a/OpenCL/m11800_a3.cl b/OpenCL/m11800_a3.cl index b1e81d7..3524e0f 100644 --- a/OpenCL/m11800_a3.cl +++ b/OpenCL/m11800_a3.cl @@ -1,36 +1,46 @@ /** - * Author......: Jens Steube + * Authors.....: Jens Steube + * Gabriele Gristina + * magnum + * * License.....: MIT */ -#define _GOST2012_512_ +#define _GOST2012_256_ -#include "include/constants.h" -#include "include/kernel_vendor.h" +//too much register pressure +//#define NEW_SIMD_CODE -#define DGST_R0 0 -#define DGST_R1 1 -#define DGST_R2 2 -#define DGST_R3 3 - -#include "include/kernel_functions.c" -#include "types_ocl.c" -#include "common.c" - -#define COMPARE_S "check_single_comp4.c" -#define COMPARE_M "check_multi_comp4.c" +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" +#include "inc_simd.cl" #define INITVAL 0 -#define SBOG_LPSti64 \ - s_sbob_sl64[0][(t[0] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[1][(t[1] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[2][(t[2] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[3][(t[3] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[4][(t[4] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[5][(t[5] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[6][(t[6] >> (i * 8)) & 0xff] ^ \ - s_sbob_sl64[7][(t[7] >> (i * 8)) & 0xff] +#if VECT_SIZE == 1 +#define BOX(S,n,i) (S)[(n)][(i)] +#elif VECT_SIZE == 2 +#define BOX(S,n,i) (u64x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1]) +#elif VECT_SIZE == 4 +#define BOX(S,n,i) (u64x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3]) +#elif VECT_SIZE == 8 +#define BOX(S,n,i) (u64x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7]) +#elif VECT_SIZE == 16 +#define BOX(S,n,i) (u64x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf]) +#endif + +#define SBOG_LPSti64 \ + BOX (s_sbob_sl64, 0, ((t[0] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 1, ((t[1] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 2, ((t[2] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 3, ((t[3] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 4, ((t[4] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 5, ((t[5] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 6, ((t[6] >> (i * 8)) & 0xff)) ^ \ + BOX (s_sbob_sl64, 7, ((t[7] >> (i * 8)) & 0xff)) // constants @@ -2226,24 +2236,25 @@ __constant u64 sbob_rc64[12][8] = }, }; -static void streebog_g (u64 h[8], const u64 m[8], __local u64 s_sbob_sl64[8][256]) +void streebog_g (u64x h[8], const u64x m[8], __local u64 (*s_sbob_sl64)[256]) { - u64 k[8]; - u64 s[8]; - u64 t[8]; + u64x k[8]; + u64x s[8]; + u64x t[8]; - #pragma unroll for (int i = 0; i < 8; i++) { t[i] = h[i]; } + #ifdef _unroll + #pragma unroll + #endif for (int i = 0; i < 8; i++) { k[i] = SBOG_LPSti64; } - #pragma unroll for (int i = 0; i < 8; i++) { s[i] = m[i]; @@ -2251,13 +2262,14 @@ static void streebog_g (u64 h[8], const u64 m[8], __local u64 s_sbob_sl64[8][256 for (int r = 0; r < 12; r++) { - #pragma unroll for (int i = 0; i < 8; i++) { t[i] = s[i] ^ k[i]; } + #ifdef _unroll #pragma unroll + #endif for (int i = 0; i < 8; i++) { s[i] = SBOG_LPSti64; @@ -2268,21 +2280,22 @@ static void streebog_g (u64 h[8], const u64 m[8], __local u64 s_sbob_sl64[8][256 t[i] = k[i] ^ sbob_rc64[r][i]; } + #ifdef _unroll #pragma unroll + #endif for (int i = 0; i < 8; i++) { k[i] = SBOG_LPSti64; } } - #pragma unroll for (int i = 0; i < 8; i++) { h[i] ^= s[i] ^ k[i] ^ m[i]; } } -static void m11800m (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset) +void m11800m (__local u64 (*s_sbob_sl64)[256], u32 w[16], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset) { /** * modifier @@ -2297,17 +2310,17 @@ static void m11800m (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le u32 w0l = w[0]; - for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 w0r = bfs_buf[il_pos].i; + const u32x w0r = ix_create_bft (bfs_buf, il_pos); - w[0] = w0l | w0r; + const u32x w0lr = w0l | w0r; /** - * reverse message block + * GOST */ - u64 m[8]; + u64x m[8]; m[0] = hl32_to_64 (w[15], w[14]); m[1] = hl32_to_64 (w[13], w[12]); @@ -2316,20 +2329,20 @@ static void m11800m (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le m[4] = hl32_to_64 (w[ 7], w[ 6]); m[5] = hl32_to_64 (w[ 5], w[ 4]); m[6] = hl32_to_64 (w[ 3], w[ 2]); - m[7] = hl32_to_64 (w[ 1], w[ 0]); + m[7] = hl32_to_64 (w[ 1], w0lr ); - m[0] = swap_workaround (m[0]); - m[1] = swap_workaround (m[1]); - m[2] = swap_workaround (m[2]); - m[3] = swap_workaround (m[3]); - m[4] = swap_workaround (m[4]); - m[5] = swap_workaround (m[5]); - m[6] = swap_workaround (m[6]); - m[7] = swap_workaround (m[7]); + m[0] = swap64 (m[0]); + m[1] = swap64 (m[1]); + m[2] = swap64 (m[2]); + m[3] = swap64 (m[3]); + m[4] = swap64 (m[4]); + m[5] = swap64 (m[5]); + m[6] = swap64 (m[6]); + m[7] = swap64 (m[7]); // state buffer (hash) - u64 h[8]; + u64x h[8]; h[0] = INITVAL; h[1] = INITVAL; @@ -2342,7 +2355,7 @@ static void m11800m (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le streebog_g (h, m, s_sbob_sl64); - u64 z[8]; + u64x z[8]; z[0] = 0; z[1] = 0; @@ -2351,21 +2364,21 @@ static void m11800m (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le z[4] = 0; z[5] = 0; z[6] = 0; - z[7] = swap_workaround ((u64) (pw_len * 8)); + z[7] = swap64 ((u64) (pw_len * 8)); streebog_g (h, z, s_sbob_sl64); streebog_g (h, m, s_sbob_sl64); - const u32 r0 = l32_from_64 (h[0]); - const u32 r1 = h32_from_64 (h[0]); - const u32 r2 = l32_from_64 (h[1]); - const u32 r3 = h32_from_64 (h[1]); + const u32x r0 = l32_from_64 (h[0]); + const u32x r1 = h32_from_64 (h[0]); + const u32x r2 = l32_from_64 (h[1]); + const u32x r3 = h32_from_64 (h[1]); - #include COMPARE_M + COMPARE_M_SIMD (r0, r1, r2, r3); } } -static void m11800s (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset) +void m11800s (__local u64 (*s_sbob_sl64)[256], u32 w[16], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset) { /** * modifier @@ -2392,17 +2405,17 @@ static void m11800s (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le u32 w0l = w[0]; - for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 w0r = bfs_buf[il_pos].i; + const u32x w0r = ix_create_bft (bfs_buf, il_pos); - w[0] = w0l | w0r; + const u32x w0lr = w0l | w0r; /** - * reverse message block + * GOST */ - u64 m[8]; + u64x m[8]; m[0] = hl32_to_64 (w[15], w[14]); m[1] = hl32_to_64 (w[13], w[12]); @@ -2411,20 +2424,20 @@ static void m11800s (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le m[4] = hl32_to_64 (w[ 7], w[ 6]); m[5] = hl32_to_64 (w[ 5], w[ 4]); m[6] = hl32_to_64 (w[ 3], w[ 2]); - m[7] = hl32_to_64 (w[ 1], w[ 0]); + m[7] = hl32_to_64 (w[ 1], w0lr ); - m[0] = swap_workaround (m[0]); - m[1] = swap_workaround (m[1]); - m[2] = swap_workaround (m[2]); - m[3] = swap_workaround (m[3]); - m[4] = swap_workaround (m[4]); - m[5] = swap_workaround (m[5]); - m[6] = swap_workaround (m[6]); - m[7] = swap_workaround (m[7]); + m[0] = swap64 (m[0]); + m[1] = swap64 (m[1]); + m[2] = swap64 (m[2]); + m[3] = swap64 (m[3]); + m[4] = swap64 (m[4]); + m[5] = swap64 (m[5]); + m[6] = swap64 (m[6]); + m[7] = swap64 (m[7]); // state buffer (hash) - u64 h[8]; + u64x h[8]; h[0] = INITVAL; h[1] = INITVAL; @@ -2437,7 +2450,7 @@ static void m11800s (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le streebog_g (h, m, s_sbob_sl64); - u64 z[8]; + u64x z[8]; z[0] = 0; z[1] = 0; @@ -2446,28 +2459,55 @@ static void m11800s (__local u64 s_sbob_sl64[8][256], u32 w[16], const u32 pw_le z[4] = 0; z[5] = 0; z[6] = 0; - z[7] = swap_workaround ((u64) (pw_len * 8)); + z[7] = swap64 ((u64) (pw_len * 8)); streebog_g (h, z, s_sbob_sl64); streebog_g (h, m, s_sbob_sl64); - const u32 r0 = l32_from_64 (h[0]); - const u32 r1 = h32_from_64 (h[0]); - const u32 r2 = l32_from_64 (h[1]); - const u32 r3 = h32_from_64 (h[1]); + const u32x r0 = l32_from_64 (h[0]); + const u32x r1 = h32_from_64 (h[0]); + const u32x r2 = l32_from_64 (h[1]); + const u32x r3 = h32_from_64 (h[1]); - #include COMPARE_S + COMPARE_S_SIMD (r0, r1, r2, r3); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m11800_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** - * base + * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * shared lookup table + */ + + __local u64 s_sbob_sl64[8][256]; + + for (u32 i = lid; i < 256; i += lsz) + { + s_sbob_sl64[0][i] = sbob_sl64[0][i]; + s_sbob_sl64[1][i] = sbob_sl64[1][i]; + s_sbob_sl64[2][i] = sbob_sl64[2][i]; + s_sbob_sl64[3][i] = sbob_sl64[3][i]; + s_sbob_sl64[4][i] = sbob_sl64[4][i]; + s_sbob_sl64[5][i] = sbob_sl64[5][i]; + s_sbob_sl64[6][i] = sbob_sl64[6][i]; + s_sbob_sl64[7][i] = sbob_sl64[7][i]; + } + + barrier (CLK_LOCAL_MEM_FENCE); + + if (gid >= gid_max) return; + + /** + * base + */ u32 w[16]; @@ -2491,66 +2531,48 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_m04 (__glo const u32 pw_len = pws[gid].pw_len; /** - * shared lookup table + * main + */ + + m11800m (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset); +} + +__kernel void m11800_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier */ - const u32 lid4 = lid * 4; + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * shared lookup table + */ __local u64 s_sbob_sl64[8][256]; - s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0]; - s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1]; - s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2]; - s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3]; - s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0]; - s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1]; - s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2]; - s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3]; - s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0]; - s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1]; - s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2]; - s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3]; - s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0]; - s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1]; - s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2]; - s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3]; - s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0]; - s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1]; - s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2]; - s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3]; - s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0]; - s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1]; - s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2]; - s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3]; - s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0]; - s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1]; - s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2]; - s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3]; - s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0]; - s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1]; - s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2]; - s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_sbob_sl64[0][i] = sbob_sl64[0][i]; + s_sbob_sl64[1][i] = sbob_sl64[1][i]; + s_sbob_sl64[2][i] = sbob_sl64[2][i]; + s_sbob_sl64[3][i] = sbob_sl64[3][i]; + s_sbob_sl64[4][i] = sbob_sl64[4][i]; + s_sbob_sl64[5][i] = sbob_sl64[5][i]; + s_sbob_sl64[6][i] = sbob_sl64[6][i]; + s_sbob_sl64[7][i] = sbob_sl64[7][i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m11800m (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ /** * base */ - const u32 gid = get_global_id (0); - const u32 lid = get_local_id (0); - u32 w[16]; w[ 0] = pws[gid].i[ 0]; @@ -2573,66 +2595,48 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_m08 (__glo const u32 pw_len = pws[gid].pw_len; /** - * shared lookup table + * main */ - const u32 lid4 = lid * 4; + m11800m (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset); +} + +__kernel void m11800_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier + */ + + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * shared lookup table + */ __local u64 s_sbob_sl64[8][256]; - s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0]; - s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1]; - s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2]; - s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3]; - s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0]; - s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1]; - s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2]; - s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3]; - s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0]; - s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1]; - s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2]; - s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3]; - s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0]; - s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1]; - s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2]; - s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3]; - s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0]; - s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1]; - s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2]; - s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3]; - s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0]; - s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1]; - s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2]; - s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3]; - s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0]; - s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1]; - s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2]; - s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3]; - s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0]; - s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1]; - s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2]; - s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_sbob_sl64[0][i] = sbob_sl64[0][i]; + s_sbob_sl64[1][i] = sbob_sl64[1][i]; + s_sbob_sl64[2][i] = sbob_sl64[2][i]; + s_sbob_sl64[3][i] = sbob_sl64[3][i]; + s_sbob_sl64[4][i] = sbob_sl64[4][i]; + s_sbob_sl64[5][i] = sbob_sl64[5][i]; + s_sbob_sl64[6][i] = sbob_sl64[6][i]; + s_sbob_sl64[7][i] = sbob_sl64[7][i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m11800m (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ /** * base */ - const u32 gid = get_global_id (0); - const u32 lid = get_local_id (0); - u32 w[16]; w[ 0] = pws[gid].i[ 0]; @@ -2655,66 +2659,48 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_m16 (__glo const u32 pw_len = pws[gid].pw_len; /** - * shared lookup table + * main */ - const u32 lid4 = lid * 4; + m11800m (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset); +} + +__kernel void m11800_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier + */ + + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * shared lookup table + */ __local u64 s_sbob_sl64[8][256]; - s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0]; - s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1]; - s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2]; - s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3]; - s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0]; - s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1]; - s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2]; - s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3]; - s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0]; - s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1]; - s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2]; - s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3]; - s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0]; - s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1]; - s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2]; - s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3]; - s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0]; - s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1]; - s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2]; - s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3]; - s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0]; - s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1]; - s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2]; - s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3]; - s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0]; - s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1]; - s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2]; - s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3]; - s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0]; - s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1]; - s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2]; - s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_sbob_sl64[0][i] = sbob_sl64[0][i]; + s_sbob_sl64[1][i] = sbob_sl64[1][i]; + s_sbob_sl64[2][i] = sbob_sl64[2][i]; + s_sbob_sl64[3][i] = sbob_sl64[3][i]; + s_sbob_sl64[4][i] = sbob_sl64[4][i]; + s_sbob_sl64[5][i] = sbob_sl64[5][i]; + s_sbob_sl64[6][i] = sbob_sl64[6][i]; + s_sbob_sl64[7][i] = sbob_sl64[7][i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m11800m (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ /** * base */ - const u32 gid = get_global_id (0); - const u32 lid = get_local_id (0); - u32 w[16]; w[ 0] = pws[gid].i[ 0]; @@ -2737,66 +2723,48 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_s04 (__glo const u32 pw_len = pws[gid].pw_len; /** - * shared lookup table + * main */ - const u32 lid4 = lid * 4; + m11800s (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset); +} + +__kernel void m11800_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier + */ + + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * shared lookup table + */ __local u64 s_sbob_sl64[8][256]; - s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0]; - s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1]; - s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2]; - s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3]; - s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0]; - s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1]; - s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2]; - s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3]; - s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0]; - s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1]; - s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2]; - s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3]; - s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0]; - s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1]; - s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2]; - s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3]; - s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0]; - s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1]; - s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2]; - s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3]; - s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0]; - s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1]; - s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2]; - s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3]; - s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0]; - s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1]; - s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2]; - s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3]; - s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0]; - s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1]; - s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2]; - s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_sbob_sl64[0][i] = sbob_sl64[0][i]; + s_sbob_sl64[1][i] = sbob_sl64[1][i]; + s_sbob_sl64[2][i] = sbob_sl64[2][i]; + s_sbob_sl64[3][i] = sbob_sl64[3][i]; + s_sbob_sl64[4][i] = sbob_sl64[4][i]; + s_sbob_sl64[5][i] = sbob_sl64[5][i]; + s_sbob_sl64[6][i] = sbob_sl64[6][i]; + s_sbob_sl64[7][i] = sbob_sl64[7][i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m11800s (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ /** * base */ - const u32 gid = get_global_id (0); - const u32 lid = get_local_id (0); - u32 w[16]; w[ 0] = pws[gid].i[ 0]; @@ -2819,66 +2787,48 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_s08 (__glo const u32 pw_len = pws[gid].pw_len; /** - * shared lookup table + * main + */ + + m11800s (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset); +} + +__kernel void m11800_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier */ - const u32 lid4 = lid * 4; + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * shared lookup table + */ __local u64 s_sbob_sl64[8][256]; - s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0]; - s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1]; - s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2]; - s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3]; - s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0]; - s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1]; - s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2]; - s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3]; - s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0]; - s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1]; - s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2]; - s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3]; - s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0]; - s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1]; - s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2]; - s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3]; - s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0]; - s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1]; - s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2]; - s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3]; - s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0]; - s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1]; - s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2]; - s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3]; - s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0]; - s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1]; - s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2]; - s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3]; - s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0]; - s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1]; - s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2]; - s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_sbob_sl64[0][i] = sbob_sl64[0][i]; + s_sbob_sl64[1][i] = sbob_sl64[1][i]; + s_sbob_sl64[2][i] = sbob_sl64[2][i]; + s_sbob_sl64[3][i] = sbob_sl64[3][i]; + s_sbob_sl64[4][i] = sbob_sl64[4][i]; + s_sbob_sl64[5][i] = sbob_sl64[5][i]; + s_sbob_sl64[6][i] = sbob_sl64[6][i]; + s_sbob_sl64[7][i] = sbob_sl64[7][i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m11800s (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ /** * base */ - const u32 gid = get_global_id (0); - const u32 lid = get_local_id (0); - u32 w[16]; w[ 0] = pws[gid].i[ 0]; @@ -2900,54 +2850,9 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11800_s16 (__glo const u32 pw_len = pws[gid].pw_len; - /** - * shared lookup table - */ - - const u32 lid4 = lid * 4; - - __local u64 s_sbob_sl64[8][256]; - - s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0]; - s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1]; - s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2]; - s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3]; - s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0]; - s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1]; - s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2]; - s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3]; - s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0]; - s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1]; - s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2]; - s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3]; - s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0]; - s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1]; - s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2]; - s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3]; - s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0]; - s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1]; - s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2]; - s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3]; - s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0]; - s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1]; - s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2]; - s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3]; - s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0]; - s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1]; - s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2]; - s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3]; - s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0]; - s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1]; - s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2]; - s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3]; - - barrier (CLK_LOCAL_MEM_FENCE); - - if (gid >= gid_max) return; - /** * main */ - m11800s (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset); + m11800s (s_sbob_sl64, w, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset); }