X-Git-Url: https://www.flypig.org.uk/git/?a=blobdiff_plain;f=OpenCL%2Fm05600_a3.cl;h=006c7369526a52c7e411791dd42a83a25b5aa716;hb=161a6eb4bc643d8e636e96eda613f5137d30da59;hp=0d4be3bb863ddc78f87031eff29506ef800b0e22;hpb=5f7c47b461b7e3ffb727baeee62f41ac537cc0ba;p=hashcat.git diff --git a/OpenCL/m05600_a3.cl b/OpenCL/m05600_a3.cl index 0d4be3b..006c736 100644 --- a/OpenCL/m05600_a3.cl +++ b/OpenCL/m05600_a3.cl @@ -1,48 +1,44 @@ /** - * Author......: Jens Steube + * Authors.....: Jens Steube + * Gabriele Gristina + * * License.....: MIT */ #define _NETNTLMV2_ -#include "include/constants.h" -#include "include/kernel_vendor.h" +#define NEW_SIMD_CODE -#define DGST_R0 0 -#define DGST_R1 3 -#define DGST_R2 2 -#define DGST_R3 1 +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" +#include "inc_simd.cl" -#include "include/kernel_functions.c" -#include "OpenCL/types_ocl.c" -#include "OpenCL/common.c" - -#define COMPARE_S "OpenCL/check_single_comp4.c" -#define COMPARE_M "OpenCL/check_multi_comp4.c" - -static void md4_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4]) +void md4_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[4]) { - u32 a = digest[0]; - u32 b = digest[1]; - u32 c = digest[2]; - u32 d = digest[3]; - - u32 w0_t = w0[0]; - u32 w1_t = w0[1]; - u32 w2_t = w0[2]; - u32 w3_t = w0[3]; - u32 w4_t = w1[0]; - u32 w5_t = w1[1]; - u32 w6_t = w1[2]; - u32 w7_t = w1[3]; - u32 w8_t = w2[0]; - u32 w9_t = w2[1]; - u32 wa_t = w2[2]; - u32 wb_t = w2[3]; - u32 wc_t = w3[0]; - u32 wd_t = w3[1]; - u32 we_t = w3[2]; - u32 wf_t = w3[3]; + u32x a = digest[0]; + u32x b = digest[1]; + u32x c = digest[2]; + u32x d = digest[3]; + + u32x w0_t = w0[0]; + u32x w1_t = w0[1]; + u32x w2_t = w0[2]; + u32x w3_t = w0[3]; + u32x w4_t = w1[0]; + u32x w5_t = w1[1]; + u32x w6_t = w1[2]; + u32x w7_t = w1[3]; + u32x w8_t = w2[0]; + u32x w9_t = w2[1]; + u32x wa_t = w2[2]; + u32x wb_t = w2[3]; + u32x wc_t = w3[0]; + u32x wd_t = w3[1]; + u32x we_t = w3[2]; + u32x wf_t = w3[3]; MD4_STEP (MD4_Fo, a, b, c, d, w0_t, MD4C00, MD4S00); MD4_STEP (MD4_Fo, d, a, b, c, w1_t, MD4C00, MD4S01); @@ -101,29 +97,29 @@ static void md4_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], co digest[3] += d; } -static void md5_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4]) +void md5_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[4]) { - u32 a = digest[0]; - u32 b = digest[1]; - u32 c = digest[2]; - u32 d = digest[3]; - - u32 w0_t = w0[0]; - u32 w1_t = w0[1]; - u32 w2_t = w0[2]; - u32 w3_t = w0[3]; - u32 w4_t = w1[0]; - u32 w5_t = w1[1]; - u32 w6_t = w1[2]; - u32 w7_t = w1[3]; - u32 w8_t = w2[0]; - u32 w9_t = w2[1]; - u32 wa_t = w2[2]; - u32 wb_t = w2[3]; - u32 wc_t = w3[0]; - u32 wd_t = w3[1]; - u32 we_t = w3[2]; - u32 wf_t = w3[3]; + u32x a = digest[0]; + u32x b = digest[1]; + u32x c = digest[2]; + u32x d = digest[3]; + + u32x w0_t = w0[0]; + u32x w1_t = w0[1]; + u32x w2_t = w0[2]; + u32x w3_t = w0[3]; + u32x w4_t = w1[0]; + u32x w5_t = w1[1]; + u32x w6_t = w1[2]; + u32x w7_t = w1[3]; + u32x w8_t = w2[0]; + u32x w9_t = w2[1]; + u32x wa_t = w2[2]; + u32x wb_t = w2[3]; + u32x wc_t = w3[0]; + u32x wd_t = w3[1]; + u32x we_t = w3[2]; + u32x wf_t = w3[3]; MD5_STEP (MD5_Fo, a, b, c, d, w0_t, MD5C00, MD5S00); MD5_STEP (MD5_Fo, d, a, b, c, w1_t, MD5C01, MD5S01); @@ -199,7 +195,7 @@ static void md5_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], co digest[3] += d; } -static void hmac_md5_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4], u32 opad[4]) +void hmac_md5_pad (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[4], u32x opad[4]) { w0[0] = w0[0] ^ 0x36363636; w0[1] = w0[1] ^ 0x36363636; @@ -250,7 +246,7 @@ static void hmac_md5_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4 md5_transform (w0, w1, w2, w3, opad); } -static void hmac_md5_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4], u32 opad[4], u32 digest[4]) +void hmac_md5_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[4], u32x opad[4], u32x digest[4]) { digest[0] = ipad[0]; digest[1] = ipad[1]; @@ -284,7 +280,7 @@ static void hmac_md5_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4 md5_transform (w0, w1, w2, w3, digest); } -static void m05600m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 s_userdomain_buf[64], __local u32 s_chall_buf[256]) +void m05600m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_userdomain_buf, __local u32 *s_chall_buf) { /** * modifier @@ -298,10 +294,10 @@ static void m05600m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le */ const u32 userdomain_len = netntlm_bufs[salt_pos].user_len - + netntlm_bufs[salt_pos].domain_len; + + netntlm_bufs[salt_pos].domain_len; const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len - + netntlm_bufs[salt_pos].clichall_len; + + netntlm_bufs[salt_pos].clichall_len; /** * loop @@ -309,25 +305,46 @@ static void m05600m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le u32 w0l = w0[0]; - for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 w0r = bfs_buf[il_pos].i; - - w0[0] = w0l | w0r; - - u32 digest[4]; + const u32x w0r = ix_create_bft (bfs_buf, il_pos); + + const u32x w0lr = w0l | w0r; + + /** + * pads + */ + + u32x w0_t[4]; + u32x w1_t[4]; + u32x w2_t[4]; + u32x w3_t[4]; + + w0_t[0] = w0lr; + w0_t[1] = w0[1]; + w0_t[2] = w0[2]; + w0_t[3] = w0[3]; + w1_t[0] = w1[0]; + w1_t[1] = w1[1]; + w1_t[2] = w1[2]; + w1_t[3] = w1[3]; + w2_t[0] = w2[0]; + w2_t[1] = w2[1]; + w2_t[2] = w2[2]; + w2_t[3] = w2[3]; + w3_t[0] = w3[0]; + w3_t[1] = w3[1]; + w3_t[2] = w3[2]; + w3_t[3] = w3[3]; + + u32x digest[4]; digest[0] = MD4M_A; digest[1] = MD4M_B; digest[2] = MD4M_C; digest[3] = MD4M_D; - md4_transform (w0, w1, w2, w3, digest); - - u32 w0_t[4]; - u32 w1_t[4]; - u32 w2_t[4]; - u32 w3_t[4]; + md4_transform (w0_t, w1_t, w2_t, w3_t, digest); w0_t[0] = digest[0]; w0_t[1] = digest[1]; @@ -351,8 +368,8 @@ static void m05600m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le digest[2] = MD5M_C; digest[3] = MD5M_D; - u32 ipad[4]; - u32 opad[4]; + u32x ipad[4]; + u32x opad[4]; hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad); @@ -465,16 +482,11 @@ static void m05600m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest); - const u32 r0 = digest[0]; - const u32 r1 = digest[3]; - const u32 r2 = digest[2]; - const u32 r3 = digest[1]; - - #include COMPARE_M + COMPARE_M_SIMD (digest[0], digest[3], digest[2], digest[1]); } } -static void m05600s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 s_userdomain_buf[64], __local u32 s_chall_buf[256]) +void m05600s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_userdomain_buf, __local u32 *s_chall_buf) { /** * modifier @@ -483,6 +495,16 @@ static void m05600s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); + /** + * prepare + */ + + const u32 userdomain_len = netntlm_bufs[salt_pos].user_len + + netntlm_bufs[salt_pos].domain_len; + + const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len + + netntlm_bufs[salt_pos].clichall_len; + /** * digest */ @@ -495,41 +517,52 @@ static void m05600s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le digests_buf[digests_offset].digest_buf[DGST_R3] }; - /** - * prepare - */ - - const u32 userdomain_len = netntlm_bufs[salt_pos].user_len - + netntlm_bufs[salt_pos].domain_len; - - const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len - + netntlm_bufs[salt_pos].clichall_len; - /** * loop */ u32 w0l = w0[0]; - for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 w0r = bfs_buf[il_pos].i; - - w0[0] = w0l | w0r; - - u32 digest[4]; + const u32x w0r = ix_create_bft (bfs_buf, il_pos); + + const u32x w0lr = w0l | w0r; + + /** + * pads + */ + + u32x w0_t[4]; + u32x w1_t[4]; + u32x w2_t[4]; + u32x w3_t[4]; + + w0_t[0] = w0lr; + w0_t[1] = w0[1]; + w0_t[2] = w0[2]; + w0_t[3] = w0[3]; + w1_t[0] = w1[0]; + w1_t[1] = w1[1]; + w1_t[2] = w1[2]; + w1_t[3] = w1[3]; + w2_t[0] = w2[0]; + w2_t[1] = w2[1]; + w2_t[2] = w2[2]; + w2_t[3] = w2[3]; + w3_t[0] = w3[0]; + w3_t[1] = w3[1]; + w3_t[2] = w3[2]; + w3_t[3] = w3[3]; + + u32x digest[4]; digest[0] = MD4M_A; digest[1] = MD4M_B; digest[2] = MD4M_C; digest[3] = MD4M_D; - md4_transform (w0, w1, w2, w3, digest); - - u32 w0_t[4]; - u32 w1_t[4]; - u32 w2_t[4]; - u32 w3_t[4]; + md4_transform (w0_t, w1_t, w2_t, w3_t, digest); w0_t[0] = digest[0]; w0_t[1] = digest[1]; @@ -553,8 +586,8 @@ static void m05600s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le digest[2] = MD5M_C; digest[3] = MD5M_D; - u32 ipad[4]; - u32 opad[4]; + u32x ipad[4]; + u32x opad[4]; hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad); @@ -667,28 +700,51 @@ static void m05600s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_le hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest); - const u32 r0 = digest[0]; - const u32 r1 = digest[3]; - const u32 r2 = digest[2]; - const u32 r3 = digest[1]; - - #include COMPARE_S + COMPARE_S_SIMD (digest[0], digest[3], digest[2], digest[1]); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05600_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ + const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); /** - * base + * salt */ - const u32 gid = get_global_id (0); + __local u32 s_userdomain_buf[64]; + + for (u32 i = lid; i < 64; i += lsz) + { + s_userdomain_buf[i] = netntlm_bufs[salt_pos].userdomain_buf[i]; + } + + __local u32 s_chall_buf[256]; + + for (u32 i = lid; i < 256; i += lsz) + { + s_chall_buf[i] = netntlm_bufs[salt_pos].chall_buf[i]; + } + + barrier (CLK_LOCAL_MEM_FENCE); + + if (gid >= gid_max) return; + + const u32 userdomain_len = netntlm_bufs[salt_pos].user_len + + netntlm_bufs[salt_pos].domain_len; + + const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len + + netntlm_bufs[salt_pos].clichall_len; + + /** + * base + */ u32 w0[4]; @@ -721,49 +777,54 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_m04 (__glo const u32 pw_len = pws[gid].pw_len; /** - * s_userdomain, s_chall + * main + */ + + m05600m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); +} + +__kernel void m05600_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier + */ + + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + + /** + * salt */ __local u32 s_userdomain_buf[64]; - __local u32 s_chall_buf[256]; - if (lid < 64) + for (u32 i = lid; i < 64; i += lsz) { - s_userdomain_buf[lid] = netntlm_bufs[salt_pos].userdomain_buf[lid]; + s_userdomain_buf[i] = netntlm_bufs[salt_pos].userdomain_buf[i]; } - const u32 lid4 = lid * 4; + __local u32 s_chall_buf[256]; - s_chall_buf[lid4 + 0] = netntlm_bufs[salt_pos].chall_buf[lid4 + 0]; - s_chall_buf[lid4 + 1] = netntlm_bufs[salt_pos].chall_buf[lid4 + 1]; - s_chall_buf[lid4 + 2] = netntlm_bufs[salt_pos].chall_buf[lid4 + 2]; - s_chall_buf[lid4 + 3] = netntlm_bufs[salt_pos].chall_buf[lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_chall_buf[i] = netntlm_bufs[salt_pos].chall_buf[i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m05600m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ - /** - * modifier - */ + const u32 userdomain_len = netntlm_bufs[salt_pos].user_len + + netntlm_bufs[salt_pos].domain_len; - const u32 lid = get_local_id (0); + const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len + + netntlm_bufs[salt_pos].clichall_len; /** * base */ - const u32 gid = get_global_id (0); - u32 w0[4]; w0[0] = pws[gid].i[ 0]; @@ -794,53 +855,58 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_m08 (__glo const u32 pw_len = pws[gid].pw_len; - /** - * s_userdomain, s_chall - */ - - __local u32 s_userdomain_buf[64]; - __local u32 s_chall_buf[256]; - - if (lid < 64) - { - s_userdomain_buf[lid] = netntlm_bufs[salt_pos].userdomain_buf[lid]; - } - - const u32 lid4 = lid * 4; - - s_chall_buf[lid4 + 0] = netntlm_bufs[salt_pos].chall_buf[lid4 + 0]; - s_chall_buf[lid4 + 1] = netntlm_bufs[salt_pos].chall_buf[lid4 + 1]; - s_chall_buf[lid4 + 2] = netntlm_bufs[salt_pos].chall_buf[lid4 + 2]; - s_chall_buf[lid4 + 3] = netntlm_bufs[salt_pos].chall_buf[lid4 + 3]; - - barrier (CLK_LOCAL_MEM_FENCE); - - if (gid >= gid_max) return; - /** * main */ - m05600m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); + m05600m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05600_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05600_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ + const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); /** - * base + * salt */ - const u32 gid = get_global_id (0); + __local u32 s_userdomain_buf[64]; + + for (u32 i = lid; i < 64; i += lsz) + { + s_userdomain_buf[i] = netntlm_bufs[salt_pos].userdomain_buf[i]; + } + + __local u32 s_chall_buf[256]; + + for (u32 i = lid; i < 256; i += lsz) + { + s_chall_buf[i] = netntlm_bufs[salt_pos].chall_buf[i]; + } + + barrier (CLK_LOCAL_MEM_FENCE); + + if (gid >= gid_max) return; + + const u32 userdomain_len = netntlm_bufs[salt_pos].user_len + + netntlm_bufs[salt_pos].domain_len; + + const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len + + netntlm_bufs[salt_pos].clichall_len; + + /** + * base + */ u32 w0[4]; @@ -872,50 +938,55 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_s04 (__glo const u32 pw_len = pws[gid].pw_len; + /** + * main + */ + + m05600s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); +} + +__kernel void m05600_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * modifier + */ + + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); + /** * salt */ __local u32 s_userdomain_buf[64]; - __local u32 s_chall_buf[256]; - if (lid < 64) + for (u32 i = lid; i < 64; i += lsz) { - s_userdomain_buf[lid] = netntlm_bufs[salt_pos].userdomain_buf[lid]; + s_userdomain_buf[i] = netntlm_bufs[salt_pos].userdomain_buf[i]; } - const u32 lid4 = lid * 4; + __local u32 s_chall_buf[256]; - s_chall_buf[lid4 + 0] = netntlm_bufs[salt_pos].chall_buf[lid4 + 0]; - s_chall_buf[lid4 + 1] = netntlm_bufs[salt_pos].chall_buf[lid4 + 1]; - s_chall_buf[lid4 + 2] = netntlm_bufs[salt_pos].chall_buf[lid4 + 2]; - s_chall_buf[lid4 + 3] = netntlm_bufs[salt_pos].chall_buf[lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_chall_buf[i] = netntlm_bufs[salt_pos].chall_buf[i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; - /** - * main - */ - - m05600s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); -} - -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) -{ - /** - * modifier - */ + const u32 userdomain_len = netntlm_bufs[salt_pos].user_len + + netntlm_bufs[salt_pos].domain_len; - const u32 lid = get_local_id (0); + const u32 chall_len = netntlm_bufs[salt_pos].srvchall_len + + netntlm_bufs[salt_pos].clichall_len; /** * base */ - const u32 gid = get_global_id (0); - u32 w0[4]; w0[0] = pws[gid].i[ 0]; @@ -946,36 +1017,13 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_s08 (__glo const u32 pw_len = pws[gid].pw_len; - /** - * salt - */ - - __local u32 s_userdomain_buf[64]; - __local u32 s_chall_buf[256]; - - if (lid < 64) - { - s_userdomain_buf[lid] = netntlm_bufs[salt_pos].userdomain_buf[lid]; - } - - const u32 lid4 = lid * 4; - - s_chall_buf[lid4 + 0] = netntlm_bufs[salt_pos].chall_buf[lid4 + 0]; - s_chall_buf[lid4 + 1] = netntlm_bufs[salt_pos].chall_buf[lid4 + 1]; - s_chall_buf[lid4 + 2] = netntlm_bufs[salt_pos].chall_buf[lid4 + 2]; - s_chall_buf[lid4 + 3] = netntlm_bufs[salt_pos].chall_buf[lid4 + 3]; - - barrier (CLK_LOCAL_MEM_FENCE); - - if (gid >= gid_max) return; - /** * main */ - m05600s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); + m05600s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, netntlm_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_userdomain_buf, s_chall_buf); } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05600_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05600_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global netntlm_t *netntlm_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { }