X-Git-Url: https://www.flypig.org.uk/git/?a=blobdiff_plain;f=OpenCL%2Fm05000_a1.cl;h=a6192843fad01e6c8252702f466aaeef001b463b;hb=161a6eb4bc643d8e636e96eda613f5137d30da59;hp=df47782bf07dd329496a00529ef1440b265f39c9;hpb=50f39b3563fa429dbec441288bd9a21fc328c21e;p=hashcat.git diff --git a/OpenCL/m05000_a1.cl b/OpenCL/m05000_a1.cl index df47782..a619284 100644 --- a/OpenCL/m05000_a1.cl +++ b/OpenCL/m05000_a1.cl @@ -5,20 +5,14 @@ #define _KECCAK_ -#include "include/constants.h" -#include "include/kernel_vendor.h" +#define NEW_SIMD_CODE -#define DGST_R0 2 -#define DGST_R1 3 -#define DGST_R2 4 -#define DGST_R3 5 - -#include "include/kernel_functions.c" -#include "types_ocl.c" -#include "common.c" - -#define COMPARE_S "check_single_comp4.c" -#define COMPARE_M "check_multi_comp4.c" +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" +#include "inc_simd.cl" __constant u64 keccakf_rndc[24] = { @@ -32,18 +26,6 @@ __constant u64 keccakf_rndc[24] = 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 }; -__constant u32 keccakf_rotc[24] = -{ - 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, - 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 -}; - -__constant u32 keccakf_piln[24] = -{ - 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, - 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 -}; - #ifndef KECCAK_ROUNDS #define KECCAK_ROUNDS 24 #endif @@ -61,8 +43,8 @@ __constant u32 keccakf_piln[24] = #define Rho_Pi(s) \ { \ - u32 j = keccakf_piln[s]; \ - u32 k = keccakf_rotc[s]; \ + u32 j = keccakf_piln[s]; \ + u32 k = keccakf_rotc[s]; \ bc0 = st[j]; \ st[j] = rotl64 (t, k); \ t = bc0; \ @@ -82,7 +64,7 @@ __constant u32 keccakf_piln[24] = st[4 + s] ^= ~bc0 & bc1; \ } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier @@ -98,42 +80,35 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo if (gid >= gid_max) return; - u32 wordl0[4]; - - wordl0[0] = pws[gid].i[ 0]; - wordl0[1] = pws[gid].i[ 1]; - wordl0[2] = pws[gid].i[ 2]; - wordl0[3] = pws[gid].i[ 3]; - - u32 wordl1[4]; - - wordl1[0] = pws[gid].i[ 4]; - wordl1[1] = pws[gid].i[ 5]; - wordl1[2] = pws[gid].i[ 6]; - wordl1[3] = pws[gid].i[ 7]; + u32 pw_buf0[4]; + u32 pw_buf1[4]; - u32 wordl2[4]; - - wordl2[0] = 0; - wordl2[1] = 0; - wordl2[2] = 0; - wordl2[3] = 0; - - u32 wordl3[4]; - - wordl3[0] = 0; - wordl3[1] = 0; - wordl3[2] = 0; - wordl3[3] = 0; + pw_buf0[0] = pws[gid].i[0]; + pw_buf0[1] = pws[gid].i[1]; + pw_buf0[2] = pws[gid].i[2]; + pw_buf0[3] = pws[gid].i[3]; + pw_buf1[0] = pws[gid].i[4]; + pw_buf1[1] = pws[gid].i[5]; + pw_buf1[2] = pws[gid].i[6]; + pw_buf1[3] = pws[gid].i[7]; const u32 pw_l_len = pws[gid].pw_len; - if (combs_mode == COMBINATOR_MODE_BASE_RIGHT) + /** + * constants + */ + + const u8 keccakf_rotc[24] = { - append_0x01_2x4 (wordl0, wordl1, pw_l_len); + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 + }; - switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len); - } + const u8 keccakf_piln[24] = + { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 + }; /** * 0x80 keccak, very special @@ -149,95 +124,89 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo * loop */ - for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 pw_r_len = combs_buf[il_pos].pw_len; - - const u32 pw_len = pw_l_len + pw_r_len; - - u32 wordr0[4]; - - wordr0[0] = combs_buf[il_pos].i[0]; - wordr0[1] = combs_buf[il_pos].i[1]; - wordr0[2] = combs_buf[il_pos].i[2]; - wordr0[3] = combs_buf[il_pos].i[3]; - - u32 wordr1[4]; - - wordr1[0] = combs_buf[il_pos].i[4]; - wordr1[1] = combs_buf[il_pos].i[5]; - wordr1[2] = combs_buf[il_pos].i[6]; - wordr1[3] = combs_buf[il_pos].i[7]; - - u32 wordr2[4]; - - wordr2[0] = 0; - wordr2[1] = 0; - wordr2[2] = 0; - wordr2[3] = 0; - - u32 wordr3[4]; - - wordr3[0] = 0; - wordr3[1] = 0; - wordr3[2] = 0; - wordr3[3] = 0; + const u32x pw_r_len = pwlenx_create_combt (combs_buf, il_pos); + + const u32x pw_len = pw_l_len + pw_r_len; + + /** + * concat password candidate + */ + + u32x wordl0[4] = { 0 }; + u32x wordl1[4] = { 0 }; + u32x wordl2[4] = { 0 }; + u32x wordl3[4] = { 0 }; + + wordl0[0] = pw_buf0[0]; + wordl0[1] = pw_buf0[1]; + wordl0[2] = pw_buf0[2]; + wordl0[3] = pw_buf0[3]; + wordl1[0] = pw_buf1[0]; + wordl1[1] = pw_buf1[1]; + wordl1[2] = pw_buf1[2]; + wordl1[3] = pw_buf1[3]; + + u32x wordr0[4] = { 0 }; + u32x wordr1[4] = { 0 }; + u32x wordr2[4] = { 0 }; + u32x wordr3[4] = { 0 }; + + wordr0[0] = ix_create_combt (combs_buf, il_pos, 0); + wordr0[1] = ix_create_combt (combs_buf, il_pos, 1); + wordr0[2] = ix_create_combt (combs_buf, il_pos, 2); + wordr0[3] = ix_create_combt (combs_buf, il_pos, 3); + wordr1[0] = ix_create_combt (combs_buf, il_pos, 4); + wordr1[1] = ix_create_combt (combs_buf, il_pos, 5); + wordr1[2] = ix_create_combt (combs_buf, il_pos, 6); + wordr1[3] = ix_create_combt (combs_buf, il_pos, 7); if (combs_mode == COMBINATOR_MODE_BASE_LEFT) { - append_0x01_2x4 (wordr0, wordr1, pw_r_len); - - switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len); + switch_buffer_by_offset_le_VV (wordr0, wordr1, wordr2, wordr3, pw_l_len); + } + else + { + switch_buffer_by_offset_le_VV (wordl0, wordl1, wordl2, wordl3, pw_r_len); } - u32 w0[4]; + u32x w0[4]; + u32x w1[4]; + u32x w2[4]; + u32x w3[4]; w0[0] = wordl0[0] | wordr0[0]; w0[1] = wordl0[1] | wordr0[1]; w0[2] = wordl0[2] | wordr0[2]; w0[3] = wordl0[3] | wordr0[3]; - - u32 w1[4]; - w1[0] = wordl1[0] | wordr1[0]; w1[1] = wordl1[1] | wordr1[1]; w1[2] = wordl1[2] | wordr1[2]; w1[3] = wordl1[3] | wordr1[3]; - - u32 w2[4]; - w2[0] = wordl2[0] | wordr2[0]; w2[1] = wordl2[1] | wordr2[1]; w2[2] = wordl2[2] | wordr2[2]; w2[3] = wordl2[3] | wordr2[3]; - - u32 w3[4]; - w3[0] = wordl3[0] | wordr3[0]; w3[1] = wordl3[1] | wordr3[1]; - w3[2] = pw_len * 8; - w3[3] = 0; - - u64 st[25]; - - #ifdef VECT_SIZE1 - st[ 0] = (u64x) (w0[0]) | (u64x) (w0[1]) << 32; - st[ 1] = (u64x) (w0[2]) | (u64x) (w0[3]) << 32; - st[ 2] = (u64x) (w1[0]) | (u64x) (w1[1]) << 32; - st[ 3] = (u64x) (w1[2]) | (u64x) (w1[3]) << 32; - #endif - - #ifdef VECT_SIZE2 - st[ 0] = (u64x) (w0[0].s0, w0[0].s1) | (u64x) (w0[1].s0, w0[1].s1) << 32; - st[ 1] = (u64x) (w0[2].s0, w0[2].s1) | (u64x) (w0[3].s0, w0[3].s1) << 32; - st[ 2] = (u64x) (w1[0].s0, w1[0].s1) | (u64x) (w1[1].s0, w1[1].s1) << 32; - st[ 3] = (u64x) (w1[2].s0, w1[2].s1) | (u64x) (w1[3].s0, w1[3].s1) << 32; - #endif - - st[ 4] = 0; - st[ 5] = 0; - st[ 6] = 0; - st[ 7] = 0; + w3[2] = wordl3[2] | wordr3[2]; + w3[3] = wordl3[3] | wordr3[3]; + + /** + * Keccak + */ + + u64x st[25]; + + st[ 0] = hl32_to_64 (w0[1], w0[0]); + st[ 1] = hl32_to_64 (w0[3], w0[2]); + st[ 2] = hl32_to_64 (w1[1], w1[0]); + st[ 3] = hl32_to_64 (w1[3], w1[2]); + st[ 4] = hl32_to_64 (w2[1], w2[0]); + st[ 5] = hl32_to_64 (w2[3], w2[2]); + st[ 6] = hl32_to_64 (w3[1], w3[0]); + st[ 7] = hl32_to_64 (w3[3], w3[2]); st[ 8] = 0; st[ 9] = 0; st[10] = 0; @@ -264,13 +233,13 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo { // Theta - u64 bc0 = Theta1 (0); - u64 bc1 = Theta1 (1); - u64 bc2 = Theta1 (2); - u64 bc3 = Theta1 (3); - u64 bc4 = Theta1 (4); + u64x bc0 = Theta1 (0); + u64x bc1 = Theta1 (1); + u64x bc2 = Theta1 (2); + u64x bc3 = Theta1 (3); + u64x bc4 = Theta1 (4); - u64 t; + u64x t; t = bc4 ^ rotl64 (bc1, 1); Theta2 (0); t = bc0 ^ rotl64 (bc2, 1); Theta2 (1); @@ -320,24 +289,24 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__glo st[0] ^= keccakf_rndc[round]; } - const u32 r0 = l32_from_64 (st[1]); - const u32 r1 = h32_from_64 (st[1]); - const u32 r2 = l32_from_64 (st[2]); - const u32 r3 = h32_from_64 (st[2]); + const u32x r0 = l32_from_64 (st[1]); + const u32x r1 = h32_from_64 (st[1]); + const u32x r2 = l32_from_64 (st[2]); + const u32x r3 = h32_from_64 (st[2]); - #include COMPARE_M + COMPARE_M_SIMD (r0, r1, r2, r3); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier @@ -353,42 +322,45 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo if (gid >= gid_max) return; - u32 wordl0[4]; - - wordl0[0] = pws[gid].i[ 0]; - wordl0[1] = pws[gid].i[ 1]; - wordl0[2] = pws[gid].i[ 2]; - wordl0[3] = pws[gid].i[ 3]; + u32 pw_buf0[4]; + u32 pw_buf1[4]; - u32 wordl1[4]; + pw_buf0[0] = pws[gid].i[0]; + pw_buf0[1] = pws[gid].i[1]; + pw_buf0[2] = pws[gid].i[2]; + pw_buf0[3] = pws[gid].i[3]; + pw_buf1[0] = pws[gid].i[4]; + pw_buf1[1] = pws[gid].i[5]; + pw_buf1[2] = pws[gid].i[6]; + pw_buf1[3] = pws[gid].i[7]; - wordl1[0] = pws[gid].i[ 4]; - wordl1[1] = pws[gid].i[ 5]; - wordl1[2] = pws[gid].i[ 6]; - wordl1[3] = pws[gid].i[ 7]; + const u32 pw_l_len = pws[gid].pw_len; - u32 wordl2[4]; + /** + * constants + */ - wordl2[0] = 0; - wordl2[1] = 0; - wordl2[2] = 0; - wordl2[3] = 0; + const u8 keccakf_rotc[24] = + { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 + }; - u32 wordl3[4]; + const u8 keccakf_piln[24] = + { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 + }; - wordl3[0] = 0; - wordl3[1] = 0; - wordl3[2] = 0; - wordl3[3] = 0; + /** + * 0x80 keccak, very special + */ - const u32 pw_l_len = pws[gid].pw_len; + const u32 mdlen = salt_bufs[salt_pos].keccak_mdlen; - if (combs_mode == COMBINATOR_MODE_BASE_RIGHT) - { - append_0x01_2x4 (wordl0, wordl1, pw_l_len); + const u32 rsiz = 200 - (2 * mdlen); - switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len); - } + const u32 add80w = (rsiz - 1) / 8; /** * digest @@ -402,109 +374,93 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo digests_buf[digests_offset].digest_buf[DGST_R3] }; - /** - * 0x80 keccak, very special - */ - - const u32 mdlen = salt_bufs[salt_pos].keccak_mdlen; - - const u32 rsiz = 200 - (2 * mdlen); - - const u32 add80w = (rsiz - 1) / 8; - /** * loop */ - for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++) + for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { - const u32 pw_r_len = combs_buf[il_pos].pw_len; - - const u32 pw_len = pw_l_len + pw_r_len; - - u32 wordr0[4]; - - wordr0[0] = combs_buf[il_pos].i[0]; - wordr0[1] = combs_buf[il_pos].i[1]; - wordr0[2] = combs_buf[il_pos].i[2]; - wordr0[3] = combs_buf[il_pos].i[3]; - - u32 wordr1[4]; - - wordr1[0] = combs_buf[il_pos].i[4]; - wordr1[1] = combs_buf[il_pos].i[5]; - wordr1[2] = combs_buf[il_pos].i[6]; - wordr1[3] = combs_buf[il_pos].i[7]; - - u32 wordr2[4]; - - wordr2[0] = 0; - wordr2[1] = 0; - wordr2[2] = 0; - wordr2[3] = 0; - - u32 wordr3[4]; - - wordr3[0] = 0; - wordr3[1] = 0; - wordr3[2] = 0; - wordr3[3] = 0; + const u32x pw_r_len = pwlenx_create_combt (combs_buf, il_pos); + + const u32x pw_len = pw_l_len + pw_r_len; + + /** + * concat password candidate + */ + + u32x wordl0[4] = { 0 }; + u32x wordl1[4] = { 0 }; + u32x wordl2[4] = { 0 }; + u32x wordl3[4] = { 0 }; + + wordl0[0] = pw_buf0[0]; + wordl0[1] = pw_buf0[1]; + wordl0[2] = pw_buf0[2]; + wordl0[3] = pw_buf0[3]; + wordl1[0] = pw_buf1[0]; + wordl1[1] = pw_buf1[1]; + wordl1[2] = pw_buf1[2]; + wordl1[3] = pw_buf1[3]; + + u32x wordr0[4] = { 0 }; + u32x wordr1[4] = { 0 }; + u32x wordr2[4] = { 0 }; + u32x wordr3[4] = { 0 }; + + wordr0[0] = ix_create_combt (combs_buf, il_pos, 0); + wordr0[1] = ix_create_combt (combs_buf, il_pos, 1); + wordr0[2] = ix_create_combt (combs_buf, il_pos, 2); + wordr0[3] = ix_create_combt (combs_buf, il_pos, 3); + wordr1[0] = ix_create_combt (combs_buf, il_pos, 4); + wordr1[1] = ix_create_combt (combs_buf, il_pos, 5); + wordr1[2] = ix_create_combt (combs_buf, il_pos, 6); + wordr1[3] = ix_create_combt (combs_buf, il_pos, 7); if (combs_mode == COMBINATOR_MODE_BASE_LEFT) { - append_0x01_2x4 (wordr0, wordr1, pw_r_len); - - switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len); + switch_buffer_by_offset_le_VV (wordr0, wordr1, wordr2, wordr3, pw_l_len); + } + else + { + switch_buffer_by_offset_le_VV (wordl0, wordl1, wordl2, wordl3, pw_r_len); } - u32 w0[4]; + u32x w0[4]; + u32x w1[4]; + u32x w2[4]; + u32x w3[4]; w0[0] = wordl0[0] | wordr0[0]; w0[1] = wordl0[1] | wordr0[1]; w0[2] = wordl0[2] | wordr0[2]; w0[3] = wordl0[3] | wordr0[3]; - - u32 w1[4]; - w1[0] = wordl1[0] | wordr1[0]; w1[1] = wordl1[1] | wordr1[1]; w1[2] = wordl1[2] | wordr1[2]; w1[3] = wordl1[3] | wordr1[3]; - - u32 w2[4]; - w2[0] = wordl2[0] | wordr2[0]; w2[1] = wordl2[1] | wordr2[1]; w2[2] = wordl2[2] | wordr2[2]; w2[3] = wordl2[3] | wordr2[3]; - - u32 w3[4]; - w3[0] = wordl3[0] | wordr3[0]; w3[1] = wordl3[1] | wordr3[1]; - w3[2] = pw_len * 8; - w3[3] = 0; - - u64 st[25]; - - #ifdef VECT_SIZE1 - st[ 0] = (u64x) (w0[0]) | (u64x) (w0[1]) << 32; - st[ 1] = (u64x) (w0[2]) | (u64x) (w0[3]) << 32; - st[ 2] = (u64x) (w1[0]) | (u64x) (w1[1]) << 32; - st[ 3] = (u64x) (w1[2]) | (u64x) (w1[3]) << 32; - #endif - - #ifdef VECT_SIZE2 - st[ 0] = (u64x) (w0[0].s0, w0[0].s1) | (u64x) (w0[1].s0, w0[1].s1) << 32; - st[ 1] = (u64x) (w0[2].s0, w0[2].s1) | (u64x) (w0[3].s0, w0[3].s1) << 32; - st[ 2] = (u64x) (w1[0].s0, w1[0].s1) | (u64x) (w1[1].s0, w1[1].s1) << 32; - st[ 3] = (u64x) (w1[2].s0, w1[2].s1) | (u64x) (w1[3].s0, w1[3].s1) << 32; - #endif - - st[ 4] = 0; - st[ 5] = 0; - st[ 6] = 0; - st[ 7] = 0; + w3[2] = wordl3[2] | wordr3[2]; + w3[3] = wordl3[3] | wordr3[3]; + + /** + * Keccak + */ + + u64x st[25]; + + st[ 0] = hl32_to_64 (w0[1], w0[0]); + st[ 1] = hl32_to_64 (w0[3], w0[2]); + st[ 2] = hl32_to_64 (w1[1], w1[0]); + st[ 3] = hl32_to_64 (w1[3], w1[2]); + st[ 4] = hl32_to_64 (w2[1], w2[0]); + st[ 5] = hl32_to_64 (w2[3], w2[2]); + st[ 6] = hl32_to_64 (w3[1], w3[0]); + st[ 7] = hl32_to_64 (w3[3], w3[2]); st[ 8] = 0; st[ 9] = 0; st[10] = 0; @@ -531,13 +487,13 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo { // Theta - u64 bc0 = Theta1 (0); - u64 bc1 = Theta1 (1); - u64 bc2 = Theta1 (2); - u64 bc3 = Theta1 (3); - u64 bc4 = Theta1 (4); + u64x bc0 = Theta1 (0); + u64x bc1 = Theta1 (1); + u64x bc2 = Theta1 (2); + u64x bc3 = Theta1 (3); + u64x bc4 = Theta1 (4); - u64 t; + u64x t; t = bc4 ^ rotl64 (bc1, 1); Theta2 (0); t = bc0 ^ rotl64 (bc2, 1); Theta2 (1); @@ -587,19 +543,19 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__glo st[0] ^= keccakf_rndc[round]; } - const u32 r0 = l32_from_64 (st[1]); - const u32 r1 = h32_from_64 (st[1]); - const u32 r2 = l32_from_64 (st[2]); - const u32 r3 = h32_from_64 (st[2]); + const u32x r0 = l32_from_64 (st[1]); + const u32x r1 = h32_from_64 (st[1]); + const u32x r2 = l32_from_64 (st[2]); + const u32x r3 = h32_from_64 (st[2]); - #include COMPARE_S + COMPARE_S_SIMD (r0, r1, r2, r3); } } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m05000_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { }