X-Git-Url: https://www.flypig.org.uk/git/?a=blobdiff_plain;f=OpenCL%2Fm06600.cl;h=9972e9f1187460ae22e0a5e5acf9d0a0692a9317;hb=161a6eb4bc643d8e636e96eda613f5137d30da59;hp=9b46654776cc957513ea3ef7bff846bb7f3265ad;hpb=76cc1631bee0eb0de1d091cb9ca5e1cd4b0361e4;p=hashcat.git diff --git a/OpenCL/m06600.cl b/OpenCL/m06600.cl index 9b46654..9972e9f 100644 --- a/OpenCL/m06600.cl +++ b/OpenCL/m06600.cl @@ -1,24 +1,20 @@ /** - * Author......: Jens Steube + * Authors.....: Jens Steube + * Gabriele Gristina + * * License.....: MIT */ #define _SHA1_ -#include "include/constants.h" -#include "include/kernel_vendor.h" +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" -#define DGST_R0 0 -#define DGST_R1 1 -#define DGST_R2 2 -#define DGST_R3 3 - -#include "include/kernel_functions.c" -#include "types_ocl.c" -#include "common.c" - -#define COMPARE_S "check_single_comp4.c" -#define COMPARE_M "check_multi_comp4.c" +#define COMPARE_S "inc_comp_single.cl" +#define COMPARE_M "inc_comp_multi.cl" __constant u32 te0[256] = { @@ -707,14 +703,16 @@ __constant u32 rcon[] = 0x1b000000, 0x36000000, }; -static void AES128_ExpandKey (u32 *userkey, u32 *rek, __local u32 s_te0[256], __local u32 s_te1[256], __local u32 s_te2[256], __local u32 s_te3[256], __local u32 s_te4[256]) +void AES128_ExpandKey (u32 *userkey, u32 *rek, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4) { rek[0] = userkey[0]; rek[1] = userkey[1]; rek[2] = userkey[2]; rek[3] = userkey[3]; - #pragma unroll 10 + #ifdef _unroll + #pragma unroll + #endif for (u32 i = 0, j = 0; i < 10; i += 1, j += 4) { u32 temp = rek[j + 3]; @@ -734,7 +732,7 @@ static void AES128_ExpandKey (u32 *userkey, u32 *rek, __local u32 s_te0[256], __ } } -static void AES128_InvertKey (u32 *rdk, __local u32 s_td0[256], __local u32 s_td1[256], __local u32 s_td2[256], __local u32 s_td3[256], __local u32 s_td4[256], __local u32 s_te0[256], __local u32 s_te1[256], __local u32 s_te2[256], __local u32 s_te3[256], __local u32 s_te4[256]) +void AES128_InvertKey (u32 *rdk, __local u32 *s_td0, __local u32 *s_td1, __local u32 *s_td2, __local u32 *s_td3, __local u32 *s_td4, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4) { for (u32 i = 0, j = 40; i < j; i += 4, j -= 4) { @@ -774,7 +772,7 @@ static void AES128_InvertKey (u32 *rdk, __local u32 s_td0[256], __local u32 s_td } } -static void AES128_decrypt (const u32 *in, u32 *out, const u32 *rdk, __local u32 s_td0[256], __local u32 s_td1[256], __local u32 s_td2[256], __local u32 s_td3[256], __local u32 s_td4[256]) +void AES128_decrypt (const u32 *in, u32 *out, const u32 *rdk, __local u32 *s_td0, __local u32 *s_td1, __local u32 *s_td2, __local u32 *s_td3, __local u32 *s_td4) { u32 s0 = in[0] ^ rdk[0]; u32 s1 = in[1] ^ rdk[1]; @@ -848,7 +846,7 @@ static void AES128_decrypt (const u32 *in, u32 *out, const u32 *rdk, __local u32 ^ rdk[43]; } -static void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5]) +void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5]) { u32 A = digest[0]; u32 B = digest[1]; @@ -976,7 +974,7 @@ static void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], c digest[4] += E; } -static void hmac_sha1_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[5], u32 opad[5]) +void hmac_sha1_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[5], u32 opad[5]) { w0[0] = w0[0] ^ 0x36363636; w0[1] = w0[1] ^ 0x36363636; @@ -1029,7 +1027,7 @@ static void hmac_sha1_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[ sha1_transform (w0, w1, w2, w3, opad); } -static void hmac_sha1_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[5], u32 opad[5], u32 digest[5]) +void hmac_sha1_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[5], u32 opad[5], u32 digest[5]) { digest[0] = ipad[0]; digest[1] = ipad[1]; @@ -1065,7 +1063,7 @@ static void hmac_sha1_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[ sha1_transform (w0, w1, w2, w3, digest); } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global agilekey_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m06600_init (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global agilekey_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * base @@ -1206,7 +1204,7 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_init (__gl tmps[gid].out[4] = dgst[4]; } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global agilekey_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m06600_loop (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global agilekey_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { const u32 gid = get_global_id (0); @@ -1288,7 +1286,7 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_loop (__gl tmps[gid].out[4] = out[4]; } -__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global agilekey_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +__kernel void m06600_comp (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global agilekey_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * base @@ -1296,33 +1294,12 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_comp (__gl const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); - - /** - * salt - */ - - const u32 iv[4] = - { - salt_bufs[salt_pos].salt_buf[ 4], - salt_bufs[salt_pos].salt_buf[ 5], - salt_bufs[salt_pos].salt_buf[ 6], - salt_bufs[salt_pos].salt_buf[ 7] - }; - - const u32 data[4] = - { - salt_bufs[salt_pos].salt_buf[ 8], - salt_bufs[salt_pos].salt_buf[ 9], - salt_bufs[salt_pos].salt_buf[10], - salt_bufs[salt_pos].salt_buf[11] - }; + const u32 lsz = get_local_size (0); /** * aes shared */ - const u32 lid4 = lid * 4; - __local u32 s_td0[256]; __local u32 s_td1[256]; __local u32 s_td2[256]; @@ -1335,60 +1312,45 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_comp (__gl __local u32 s_te3[256]; __local u32 s_te4[256]; - s_td0[lid4 + 0] = td0[lid4 + 0]; - s_td0[lid4 + 1] = td0[lid4 + 1]; - s_td0[lid4 + 2] = td0[lid4 + 2]; - s_td0[lid4 + 3] = td0[lid4 + 3]; - - s_td1[lid4 + 0] = td1[lid4 + 0]; - s_td1[lid4 + 1] = td1[lid4 + 1]; - s_td1[lid4 + 2] = td1[lid4 + 2]; - s_td1[lid4 + 3] = td1[lid4 + 3]; - - s_td2[lid4 + 0] = td2[lid4 + 0]; - s_td2[lid4 + 1] = td2[lid4 + 1]; - s_td2[lid4 + 2] = td2[lid4 + 2]; - s_td2[lid4 + 3] = td2[lid4 + 3]; - - s_td3[lid4 + 0] = td3[lid4 + 0]; - s_td3[lid4 + 1] = td3[lid4 + 1]; - s_td3[lid4 + 2] = td3[lid4 + 2]; - s_td3[lid4 + 3] = td3[lid4 + 3]; - - s_td4[lid4 + 0] = td4[lid4 + 0]; - s_td4[lid4 + 1] = td4[lid4 + 1]; - s_td4[lid4 + 2] = td4[lid4 + 2]; - s_td4[lid4 + 3] = td4[lid4 + 3]; - - s_te0[lid4 + 0] = te0[lid4 + 0]; - s_te0[lid4 + 1] = te0[lid4 + 1]; - s_te0[lid4 + 2] = te0[lid4 + 2]; - s_te0[lid4 + 3] = te0[lid4 + 3]; - - s_te1[lid4 + 0] = te1[lid4 + 0]; - s_te1[lid4 + 1] = te1[lid4 + 1]; - s_te1[lid4 + 2] = te1[lid4 + 2]; - s_te1[lid4 + 3] = te1[lid4 + 3]; - - s_te2[lid4 + 0] = te2[lid4 + 0]; - s_te2[lid4 + 1] = te2[lid4 + 1]; - s_te2[lid4 + 2] = te2[lid4 + 2]; - s_te2[lid4 + 3] = te2[lid4 + 3]; - - s_te3[lid4 + 0] = te3[lid4 + 0]; - s_te3[lid4 + 1] = te3[lid4 + 1]; - s_te3[lid4 + 2] = te3[lid4 + 2]; - s_te3[lid4 + 3] = te3[lid4 + 3]; - - s_te4[lid4 + 0] = te4[lid4 + 0]; - s_te4[lid4 + 1] = te4[lid4 + 1]; - s_te4[lid4 + 2] = te4[lid4 + 2]; - s_te4[lid4 + 3] = te4[lid4 + 3]; + for (u32 i = lid; i < 256; i += lsz) + { + s_td0[i] = td0[i]; + s_td1[i] = td1[i]; + s_td2[i] = td2[i]; + s_td3[i] = td3[i]; + s_td4[i] = td4[i]; + + s_te0[i] = te0[i]; + s_te1[i] = te1[i]; + s_te2[i] = te2[i]; + s_te3[i] = te3[i]; + s_te4[i] = te4[i]; + } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; + /** + * salt + */ + + const u32 iv[4] = + { + salt_bufs[salt_pos].salt_buf[ 4], + salt_bufs[salt_pos].salt_buf[ 5], + salt_bufs[salt_pos].salt_buf[ 6], + salt_bufs[salt_pos].salt_buf[ 7] + }; + + const u32 data[4] = + { + salt_bufs[salt_pos].salt_buf[ 8], + salt_bufs[salt_pos].salt_buf[ 9], + salt_bufs[salt_pos].salt_buf[10], + salt_bufs[salt_pos].salt_buf[11] + }; + /** * aes init */ @@ -1425,7 +1387,9 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06600_comp (__gl AES128_ExpandKey (ukey, rek, s_te0, s_te1, s_te2, s_te3, s_te4); - #pragma unroll KEYLEN + #ifdef _unroll + #pragma unroll + #endif for (u32 i = 0; i < KEYLEN; i++) rdk[i] = rek[i]; AES128_InvertKey (rdk, s_td0, s_td1, s_td2, s_td3, s_td4, s_te0, s_te1, s_te2, s_te3, s_te4);