X-Git-Url: https://www.flypig.org.uk/git/?a=blobdiff_plain;f=OpenCL%2Fm01500_a1.cl;h=bbb86976e0aad265b0f013e7b64e4649609bd0c3;hb=51e1d11b2d6dff4ae331cff6098169455733c412;hp=e40af0cc9d3aee2e5b1314458d852c674bd17785;hpb=1b1c70708f7fc1f9b70e61eb169282b78243310b;p=hashcat.git diff --git a/OpenCL/m01500_a1.cl b/OpenCL/m01500_a1.cl index e40af0c..bbb8697 100644 --- a/OpenCL/m01500_a1.cl +++ b/OpenCL/m01500_a1.cl @@ -1,24 +1,20 @@ /** - * Author......: Jens Steube + * Authors.....: Jens Steube + * Gabriele Gristina + * * License.....: MIT */ #define _DES_ -#include "include/constants.h" -#include "include/kernel_vendor.h" +#define NEW_SIMD_CODE -#define DGST_R0 0 -#define DGST_R1 1 -#define DGST_R2 2 -#define DGST_R3 3 - -#include "include/kernel_functions.c" -#include "types_ocl.c" -#include "common.c" - -#define COMPARE_S "check_single_comp4.c" -#define COMPARE_M "check_multi_comp4.c" +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" +#include "inc_simd.cl" #define PERM_OP(a,b,tt,n,m) \ { \ @@ -336,11 +332,21 @@ __constant u32 c_skb[8][64] = }, }; +#if VECT_SIZE == 1 #define BOX(i,n,S) (S)[(n)][(i)] - -static void _des_crypt_keysetup (u32 c, u32 d, u32 Kc[16], u32 Kd[16], __local u32 s_skb[8][64]) +#elif VECT_SIZE == 2 +#define BOX(i,n,S) (u32x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1]) +#elif VECT_SIZE == 4 +#define BOX(i,n,S) (u32x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3]) +#elif VECT_SIZE == 8 +#define BOX(i,n,S) (u32x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7]) +#elif VECT_SIZE == 16 +#define BOX(i,n,S) (u32x) ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf]) +#endif + +void _des_crypt_keysetup (u32x c, u32x d, u32x Kc[16], u32x Kd[16], __local u32 (*s_skb)[64]) { - u32 tt; + u32x tt; PERM_OP (d, c, tt, 4, 0x0f0f0f0f); HPERM_OP (c, tt, 2, 0xcccc0000); @@ -356,7 +362,9 @@ static void _des_crypt_keysetup (u32 c, u32 d, u32 Kc[16], u32 Kd[16], __local u c = c & 0x0fffffff; + #ifdef _unroll #pragma unroll + #endif for (u32 i = 0; i < 16; i++) { if ((i < 2) || (i == 8) || (i == 15)) @@ -373,52 +381,55 @@ static void _des_crypt_keysetup (u32 c, u32 d, u32 Kc[16], u32 Kd[16], __local u c = c & 0x0fffffff; d = d & 0x0fffffff; - const u32 c00 = (c >> 0) & 0x0000003f; - const u32 c06 = (c >> 6) & 0x00383003; - const u32 c07 = (c >> 7) & 0x0000003c; - const u32 c13 = (c >> 13) & 0x0000060f; - const u32 c20 = (c >> 20) & 0x00000001; - - u32 s = BOX (((c00 >> 0) & 0xff), 0, s_skb) - | BOX (((c06 >> 0) & 0xff) - |((c07 >> 0) & 0xff), 1, s_skb) - | BOX (((c13 >> 0) & 0xff) - |((c06 >> 8) & 0xff), 2, s_skb) - | BOX (((c20 >> 0) & 0xff) - |((c13 >> 8) & 0xff) - |((c06 >> 16) & 0xff), 3, s_skb); - - const u32 d00 = (d >> 0) & 0x00003c3f; - const u32 d07 = (d >> 7) & 0x00003f03; - const u32 d21 = (d >> 21) & 0x0000000f; - const u32 d22 = (d >> 22) & 0x00000030; - - u32 t = BOX (((d00 >> 0) & 0xff), 4, s_skb) - | BOX (((d07 >> 0) & 0xff) - |((d00 >> 8) & 0xff), 5, s_skb) - | BOX (((d07 >> 8) & 0xff), 6, s_skb) - | BOX (((d21 >> 0) & 0xff) - |((d22 >> 0) & 0xff), 7, s_skb); + const u32x c00 = (c >> 0) & 0x0000003f; + const u32x c06 = (c >> 6) & 0x00383003; + const u32x c07 = (c >> 7) & 0x0000003c; + const u32x c13 = (c >> 13) & 0x0000060f; + const u32x c20 = (c >> 20) & 0x00000001; + + u32x s = BOX (((c00 >> 0) & 0xff), 0, s_skb) + | BOX (((c06 >> 0) & 0xff) + |((c07 >> 0) & 0xff), 1, s_skb) + | BOX (((c13 >> 0) & 0xff) + |((c06 >> 8) & 0xff), 2, s_skb) + | BOX (((c20 >> 0) & 0xff) + |((c13 >> 8) & 0xff) + |((c06 >> 16) & 0xff), 3, s_skb); + + const u32x d00 = (d >> 0) & 0x00003c3f; + const u32x d07 = (d >> 7) & 0x00003f03; + const u32x d21 = (d >> 21) & 0x0000000f; + const u32x d22 = (d >> 22) & 0x00000030; + + u32x t = BOX (((d00 >> 0) & 0xff), 4, s_skb) + | BOX (((d07 >> 0) & 0xff) + |((d00 >> 8) & 0xff), 5, s_skb) + | BOX (((d07 >> 8) & 0xff), 6, s_skb) + | BOX (((d21 >> 0) & 0xff) + |((d22 >> 0) & 0xff), 7, s_skb); Kc[i] = ((t << 16) | (s & 0x0000ffff)); Kd[i] = ((s >> 16) | (t & 0xffff0000)); } } -static void _des_crypt_encrypt (u32 iv[2], u32 mask, u32 Kc[16], u32 Kd[16], __local u32 s_SPtrans[8][64]) +void _des_crypt_encrypt (u32x iv[2], u32 mask, u32x Kc[16], u32x Kd[16], __local u32 (*s_SPtrans)[64]) { const u32 E1 = (mask >> 2) & 0x3f0; const u32 E0 = mask & 0x3f; - u32 r = 0; - u32 l = 0; + u32x r = 0; + u32x l = 0; for (u32 i = 0; i < 25; i++) { + #ifdef _unroll + #pragma unroll + #endif for (u32 j = 0; j < 16; j += 2) { - u32 t; - u32 u; + u32x t; + u32x u; t = r ^ (r >> 16); u = t & E0; @@ -461,7 +472,7 @@ static void _des_crypt_encrypt (u32 iv[2], u32 mask, u32 Kc[16], u32 Kd[16], __l | BOX (((t >> 24) & 0x3f), 7, s_SPtrans); } - u32 tt; + u32x tt; tt = l; l = r; @@ -472,84 +483,65 @@ static void _des_crypt_encrypt (u32 iv[2], u32 mask, u32 Kc[16], u32 Kd[16], __l iv[1] = rotl32 (l, 31); } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m01500_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ + const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); /** - * base + * sbox, kbox */ - const u32 gid = get_global_id (0); - - u32 wordl0[4]; - - wordl0[0] = pws[gid].i[ 0]; - wordl0[1] = pws[gid].i[ 1]; - wordl0[2] = 0; - wordl0[3] = 0; - - u32 wordl1[4]; - - wordl1[0] = 0; - wordl1[1] = 0; - wordl1[2] = 0; - wordl1[3] = 0; - - u32 wordl2[4]; - - wordl2[0] = 0; - wordl2[1] = 0; - wordl2[2] = 0; - wordl2[3] = 0; - - u32 wordl3[4]; - - wordl3[0] = 0; - wordl3[1] = 0; - wordl3[2] = 0; - wordl3[3] = 0; - - const u32 pw_l_len = pws[gid].pw_len; + __local u32 s_SPtrans[8][64]; + __local u32 s_skb[8][64]; - if (combs_mode == COMBINATOR_MODE_BASE_RIGHT) + for (u32 i = lid; i < 64; i += lsz) { - switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len); + s_SPtrans[0][i] = c_SPtrans[0][i]; + s_SPtrans[1][i] = c_SPtrans[1][i]; + s_SPtrans[2][i] = c_SPtrans[2][i]; + s_SPtrans[3][i] = c_SPtrans[3][i]; + s_SPtrans[4][i] = c_SPtrans[4][i]; + s_SPtrans[5][i] = c_SPtrans[5][i]; + s_SPtrans[6][i] = c_SPtrans[6][i]; + s_SPtrans[7][i] = c_SPtrans[7][i]; + + s_skb[0][i] = c_skb[0][i]; + s_skb[1][i] = c_skb[1][i]; + s_skb[2][i] = c_skb[2][i]; + s_skb[3][i] = c_skb[3][i]; + s_skb[4][i] = c_skb[4][i]; + s_skb[5][i] = c_skb[5][i]; + s_skb[6][i] = c_skb[6][i]; + s_skb[7][i] = c_skb[7][i]; } - /** - * modifier - */ + barrier (CLK_LOCAL_MEM_FENCE); - __local u32 s_SPtrans[8][64]; + if (gid >= gid_max) return; - __local u32 s_skb[8][64]; + /** + * base + */ - s_SPtrans[0][lid] = c_SPtrans[0][lid]; - s_SPtrans[1][lid] = c_SPtrans[1][lid]; - s_SPtrans[2][lid] = c_SPtrans[2][lid]; - s_SPtrans[3][lid] = c_SPtrans[3][lid]; - s_SPtrans[4][lid] = c_SPtrans[4][lid]; - s_SPtrans[5][lid] = c_SPtrans[5][lid]; - s_SPtrans[6][lid] = c_SPtrans[6][lid]; - s_SPtrans[7][lid] = c_SPtrans[7][lid]; - - s_skb[0][lid] = c_skb[0][lid]; - s_skb[1][lid] = c_skb[1][lid]; - s_skb[2][lid] = c_skb[2][lid]; - s_skb[3][lid] = c_skb[3][lid]; - s_skb[4][lid] = c_skb[4][lid]; - s_skb[5][lid] = c_skb[5][lid]; - s_skb[6][lid] = c_skb[6][lid]; - s_skb[7][lid] = c_skb[7][lid]; + u32 pw_buf0[4]; + u32 pw_buf1[4]; - barrier (CLK_LOCAL_MEM_FENCE); + pw_buf0[0] = pws[gid].i[0]; + pw_buf0[1] = pws[gid].i[1]; + pw_buf0[2] = pws[gid].i[2]; + pw_buf0[3] = pws[gid].i[3]; + pw_buf1[0] = pws[gid].i[4]; + pw_buf1[1] = pws[gid].i[5]; + pw_buf1[2] = pws[gid].i[6]; + pw_buf1[3] = pws[gid].i[7]; - if (gid >= gid_max) return; + const u32 pw_l_len = pws[gid].pw_len; /** * salt @@ -558,187 +550,169 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_m04 (__glo const u32 mask = salt_bufs[salt_pos].salt_buf[0]; /** - * main + * loop */ - for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 pw_r_len = combs_buf[il_pos].pw_len; - - u32 pw_len = pw_l_len + pw_r_len; - - pw_len = (pw_len >= 8) ? 8 : pw_len; - - u32 wordr0[4]; - - wordr0[0] = combs_buf[il_pos].i[0]; - wordr0[1] = combs_buf[il_pos].i[1]; - wordr0[2] = 0; - wordr0[3] = 0; - - u32 wordr1[4]; - - wordr1[0] = 0; - wordr1[1] = 0; - wordr1[2] = 0; - wordr1[3] = 0; - - u32 wordr2[4]; - - wordr2[0] = 0; - wordr2[1] = 0; - wordr2[2] = 0; - wordr2[3] = 0; - - u32 wordr3[4]; - - wordr3[0] = 0; - wordr3[1] = 0; - wordr3[2] = 0; - wordr3[3] = 0; + const u32x pw_r_len = pwlenx_create_combt (combs_buf, il_pos); + + const u32x pw_len = pw_l_len + pw_r_len; + + /** + * concat password candidate + */ + + u32x wordl0[4] = { 0 }; + u32x wordl1[4] = { 0 }; + u32x wordl2[4] = { 0 }; + u32x wordl3[4] = { 0 }; + + wordl0[0] = pw_buf0[0]; + wordl0[1] = pw_buf0[1]; + wordl0[2] = pw_buf0[2]; + wordl0[3] = pw_buf0[3]; + wordl1[0] = pw_buf1[0]; + wordl1[1] = pw_buf1[1]; + wordl1[2] = pw_buf1[2]; + wordl1[3] = pw_buf1[3]; + + u32x wordr0[4] = { 0 }; + u32x wordr1[4] = { 0 }; + u32x wordr2[4] = { 0 }; + u32x wordr3[4] = { 0 }; + + wordr0[0] = ix_create_combt (combs_buf, il_pos, 0); + wordr0[1] = ix_create_combt (combs_buf, il_pos, 1); + wordr0[2] = ix_create_combt (combs_buf, il_pos, 2); + wordr0[3] = ix_create_combt (combs_buf, il_pos, 3); + wordr1[0] = ix_create_combt (combs_buf, il_pos, 4); + wordr1[1] = ix_create_combt (combs_buf, il_pos, 5); + wordr1[2] = ix_create_combt (combs_buf, il_pos, 6); + wordr1[3] = ix_create_combt (combs_buf, il_pos, 7); if (combs_mode == COMBINATOR_MODE_BASE_LEFT) { - switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len); + switch_buffer_by_offset_le_VV (wordr0, wordr1, wordr2, wordr3, pw_l_len); + } + else + { + switch_buffer_by_offset_le_VV (wordl0, wordl1, wordl2, wordl3, pw_r_len); } - u32 w0[4]; + u32x w0[4]; + u32x w1[4]; + u32x w2[4]; + u32x w3[4]; w0[0] = wordl0[0] | wordr0[0]; w0[1] = wordl0[1] | wordr0[1]; - w0[2] = 0; - w0[3] = 0; - - u32 w1[4]; - - w1[0] = 0; - w1[1] = 0; - w1[2] = 0; - w1[3] = 0; - - u32 w2[4]; - - w2[0] = 0; - w2[1] = 0; - w2[2] = 0; - w2[3] = 0; - - u32 w3[4]; - - w3[0] = 0; - w3[1] = 0; - w3[2] = 0; - w3[3] = 0; - - u32 data[2]; + w0[2] = wordl0[2] | wordr0[2]; + w0[3] = wordl0[3] | wordr0[3]; + w1[0] = wordl1[0] | wordr1[0]; + w1[1] = wordl1[1] | wordr1[1]; + w1[2] = wordl1[2] | wordr1[2]; + w1[3] = wordl1[3] | wordr1[3]; + w2[0] = wordl2[0] | wordr2[0]; + w2[1] = wordl2[1] | wordr2[1]; + w2[2] = wordl2[2] | wordr2[2]; + w2[3] = wordl2[3] | wordr2[3]; + w3[0] = wordl3[0] | wordr3[0]; + w3[1] = wordl3[1] | wordr3[1]; + w3[2] = wordl3[2] | wordr3[2]; + w3[3] = wordl3[3] | wordr3[3]; + + /** + * DES + */ + + u32x data[2]; data[0] = (w0[0] << 1) & 0xfefefefe; data[1] = (w0[1] << 1) & 0xfefefefe; - u32 Kc[16]; - u32 Kd[16]; + u32x Kc[16]; + u32x Kd[16]; _des_crypt_keysetup (data[0], data[1], Kc, Kd, s_skb); - u32 iv[2]; + u32x iv[2]; _des_crypt_encrypt (iv, mask, Kc, Kd, s_SPtrans); - const u32 r0 = iv[0]; - const u32 r1 = iv[1]; - const u32 r2 = 0; - const u32 r3 = 0; + u32x z = 0; - #include COMPARE_M + COMPARE_M_SIMD (iv[0], iv[1], z, z); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m01500_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m01500_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m01500_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ + const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); + const u32 lsz = get_local_size (0); /** - * base + * sbox, kbox */ - const u32 gid = get_global_id (0); - - u32 wordl0[4]; - - wordl0[0] = pws[gid].i[ 0]; - wordl0[1] = pws[gid].i[ 1]; - wordl0[2] = 0; - wordl0[3] = 0; - - u32 wordl1[4]; - - wordl1[0] = 0; - wordl1[1] = 0; - wordl1[2] = 0; - wordl1[3] = 0; - - u32 wordl2[4]; - - wordl2[0] = 0; - wordl2[1] = 0; - wordl2[2] = 0; - wordl2[3] = 0; - - u32 wordl3[4]; - - wordl3[0] = 0; - wordl3[1] = 0; - wordl3[2] = 0; - wordl3[3] = 0; - - const u32 pw_l_len = pws[gid].pw_len; + __local u32 s_SPtrans[8][64]; + __local u32 s_skb[8][64]; - if (combs_mode == COMBINATOR_MODE_BASE_RIGHT) + for (u32 i = lid; i < 64; i += lsz) { - switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len); + s_SPtrans[0][i] = c_SPtrans[0][i]; + s_SPtrans[1][i] = c_SPtrans[1][i]; + s_SPtrans[2][i] = c_SPtrans[2][i]; + s_SPtrans[3][i] = c_SPtrans[3][i]; + s_SPtrans[4][i] = c_SPtrans[4][i]; + s_SPtrans[5][i] = c_SPtrans[5][i]; + s_SPtrans[6][i] = c_SPtrans[6][i]; + s_SPtrans[7][i] = c_SPtrans[7][i]; + + s_skb[0][i] = c_skb[0][i]; + s_skb[1][i] = c_skb[1][i]; + s_skb[2][i] = c_skb[2][i]; + s_skb[3][i] = c_skb[3][i]; + s_skb[4][i] = c_skb[4][i]; + s_skb[5][i] = c_skb[5][i]; + s_skb[6][i] = c_skb[6][i]; + s_skb[7][i] = c_skb[7][i]; } - /** - * modifier - */ + barrier (CLK_LOCAL_MEM_FENCE); - __local u32 s_SPtrans[8][64]; + if (gid >= gid_max) return; - __local u32 s_skb[8][64]; + /** + * base + */ - s_SPtrans[0][lid] = c_SPtrans[0][lid]; - s_SPtrans[1][lid] = c_SPtrans[1][lid]; - s_SPtrans[2][lid] = c_SPtrans[2][lid]; - s_SPtrans[3][lid] = c_SPtrans[3][lid]; - s_SPtrans[4][lid] = c_SPtrans[4][lid]; - s_SPtrans[5][lid] = c_SPtrans[5][lid]; - s_SPtrans[6][lid] = c_SPtrans[6][lid]; - s_SPtrans[7][lid] = c_SPtrans[7][lid]; - - s_skb[0][lid] = c_skb[0][lid]; - s_skb[1][lid] = c_skb[1][lid]; - s_skb[2][lid] = c_skb[2][lid]; - s_skb[3][lid] = c_skb[3][lid]; - s_skb[4][lid] = c_skb[4][lid]; - s_skb[5][lid] = c_skb[5][lid]; - s_skb[6][lid] = c_skb[6][lid]; - s_skb[7][lid] = c_skb[7][lid]; + u32 pw_buf0[4]; + u32 pw_buf1[4]; - barrier (CLK_LOCAL_MEM_FENCE); + pw_buf0[0] = pws[gid].i[0]; + pw_buf0[1] = pws[gid].i[1]; + pw_buf0[2] = pws[gid].i[2]; + pw_buf0[3] = pws[gid].i[3]; + pw_buf1[0] = pws[gid].i[4]; + pw_buf1[1] = pws[gid].i[5]; + pw_buf1[2] = pws[gid].i[6]; + pw_buf1[3] = pws[gid].i[7]; - if (gid >= gid_max) return; + const u32 pw_l_len = pws[gid].pw_len; /** * salt @@ -754,110 +728,111 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_s04 (__glo { digests_buf[digests_offset].digest_buf[DGST_R0], digests_buf[digests_offset].digest_buf[DGST_R1], - digests_buf[digests_offset].digest_buf[DGST_R2], - digests_buf[digests_offset].digest_buf[DGST_R3] + 0, + 0 }; /** - * main + * loop */ - for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 pw_r_len = combs_buf[il_pos].pw_len; - - u32 pw_len = pw_l_len + pw_r_len; - - pw_len = (pw_len >= 8) ? 8 : pw_len; - - u32 wordr0[4]; - - wordr0[0] = combs_buf[il_pos].i[0]; - wordr0[1] = combs_buf[il_pos].i[1]; - wordr0[2] = 0; - wordr0[3] = 0; - - u32 wordr1[4]; - - wordr1[0] = 0; - wordr1[1] = 0; - wordr1[2] = 0; - wordr1[3] = 0; - - u32 wordr2[4]; - - wordr2[0] = 0; - wordr2[1] = 0; - wordr2[2] = 0; - wordr2[3] = 0; - - u32 wordr3[4]; - - wordr3[0] = 0; - wordr3[1] = 0; - wordr3[2] = 0; - wordr3[3] = 0; + const u32x pw_r_len = pwlenx_create_combt (combs_buf, il_pos); + + const u32x pw_len = pw_l_len + pw_r_len; + + /** + * concat password candidate + */ + + u32x wordl0[4] = { 0 }; + u32x wordl1[4] = { 0 }; + u32x wordl2[4] = { 0 }; + u32x wordl3[4] = { 0 }; + + wordl0[0] = pw_buf0[0]; + wordl0[1] = pw_buf0[1]; + wordl0[2] = pw_buf0[2]; + wordl0[3] = pw_buf0[3]; + wordl1[0] = pw_buf1[0]; + wordl1[1] = pw_buf1[1]; + wordl1[2] = pw_buf1[2]; + wordl1[3] = pw_buf1[3]; + + u32x wordr0[4] = { 0 }; + u32x wordr1[4] = { 0 }; + u32x wordr2[4] = { 0 }; + u32x wordr3[4] = { 0 }; + + wordr0[0] = ix_create_combt (combs_buf, il_pos, 0); + wordr0[1] = ix_create_combt (combs_buf, il_pos, 1); + wordr0[2] = ix_create_combt (combs_buf, il_pos, 2); + wordr0[3] = ix_create_combt (combs_buf, il_pos, 3); + wordr1[0] = ix_create_combt (combs_buf, il_pos, 4); + wordr1[1] = ix_create_combt (combs_buf, il_pos, 5); + wordr1[2] = ix_create_combt (combs_buf, il_pos, 6); + wordr1[3] = ix_create_combt (combs_buf, il_pos, 7); if (combs_mode == COMBINATOR_MODE_BASE_LEFT) { - switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len); + switch_buffer_by_offset_le_VV (wordr0, wordr1, wordr2, wordr3, pw_l_len); + } + else + { + switch_buffer_by_offset_le_VV (wordl0, wordl1, wordl2, wordl3, pw_r_len); } - u32 w0[4]; + u32x w0[4]; + u32x w1[4]; + u32x w2[4]; + u32x w3[4]; w0[0] = wordl0[0] | wordr0[0]; w0[1] = wordl0[1] | wordr0[1]; - w0[2] = 0; - w0[3] = 0; - - u32 w1[4]; - - w1[0] = 0; - w1[1] = 0; - w1[2] = 0; - w1[3] = 0; - - u32 w2[4]; - - w2[0] = 0; - w2[1] = 0; - w2[2] = 0; - w2[3] = 0; - - u32 w3[4]; - - w3[0] = 0; - w3[1] = 0; - w3[2] = 0; - w3[3] = 0; - - u32 data[2]; + w0[2] = wordl0[2] | wordr0[2]; + w0[3] = wordl0[3] | wordr0[3]; + w1[0] = wordl1[0] | wordr1[0]; + w1[1] = wordl1[1] | wordr1[1]; + w1[2] = wordl1[2] | wordr1[2]; + w1[3] = wordl1[3] | wordr1[3]; + w2[0] = wordl2[0] | wordr2[0]; + w2[1] = wordl2[1] | wordr2[1]; + w2[2] = wordl2[2] | wordr2[2]; + w2[3] = wordl2[3] | wordr2[3]; + w3[0] = wordl3[0] | wordr3[0]; + w3[1] = wordl3[1] | wordr3[1]; + w3[2] = wordl3[2] | wordr3[2]; + w3[3] = wordl3[3] | wordr3[3]; + + /** + * DES + */ + + u32x data[2]; data[0] = (w0[0] << 1) & 0xfefefefe; data[1] = (w0[1] << 1) & 0xfefefefe; - u32 Kc[16]; - u32 Kd[16]; + u32x Kc[16]; + u32x Kd[16]; _des_crypt_keysetup (data[0], data[1], Kc, Kd, s_skb); - u32 iv[2]; + u32x iv[2]; _des_crypt_encrypt (iv, mask, Kc, Kd, s_SPtrans); - const u32 r0 = iv[0]; - const u32 r1 = iv[1]; - const u32 r2 = 0; - const u32 r3 = 0; + u32x z = 0; - #include COMPARE_S + COMPARE_S_SIMD (iv[0], iv[1], z, z); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m01500_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01500_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m01500_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { }