#define _KEEPASS_
-#include "include/constants.h"
-#include "include/kernel_vendor.h"
+#include "inc_vendor.cl"
+#include "inc_hash_constants.h"
+#include "inc_hash_functions.cl"
+#include "inc_types.cl"
+#include "inc_common.cl"
-#define DGST_R0 0
-#define DGST_R1 1
-#define DGST_R2 2
-#define DGST_R3 3
-
-#include "include/kernel_functions.c"
-#include "OpenCL/types_ocl.c"
-#include "OpenCL/common.c"
-
-#include "OpenCL/kernel_twofish256.c"
+#include "inc_cipher_twofish256.cl"
__constant u32 te0[256] =
{
0x1b000000, 0x36000000,
};
-static void AES256_ExpandKey (u32 *userkey, u32 *rek, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4)
+void AES256_ExpandKey (u32 *userkey, u32 *rek, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4)
{
rek[0] = userkey[0];
rek[1] = userkey[1];
}
}
-static void AES256_InvertKey (u32 *rdk, __local u32 *s_td0, __local u32 *s_td1, __local u32 *s_td2, __local u32 *s_td3, __local u32 *s_td4, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4)
+void AES256_InvertKey (u32 *rdk, __local u32 *s_td0, __local u32 *s_td1, __local u32 *s_td2, __local u32 *s_td3, __local u32 *s_td4, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4)
{
for (u32 i = 0, j = 56; i < j; i += 4, j -= 4)
{
}
}
-static void AES256_decrypt (const u32 *in, u32 *out, const u32 *rdk, __local u32 *s_td0, __local u32 *s_td1, __local u32 *s_td2, __local u32 *s_td3, __local u32 *s_td4)
+void AES256_decrypt (const u32 *in, u32 *out, const u32 *rdk, __local u32 *s_td0, __local u32 *s_td1, __local u32 *s_td2, __local u32 *s_td3, __local u32 *s_td4)
{
u32 s0 = in[0] ^ rdk[0];
u32 s1 = in[1] ^ rdk[1];
^ rdk[59];
}
-static void AES256_encrypt (const u32 *in, u32 *out, const u32 *rek, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4)
+void AES256_encrypt (const u32 *in, u32 *out, const u32 *rek, __local u32 *s_te0, __local u32 *s_te1, __local u32 *s_te2, __local u32 *s_te3, __local u32 *s_te4)
{
u32 s0 = in[0] ^ rek[0];
u32 s1 = in[1] ^ rek[1];
SHA256C3c, SHA256C3d, SHA256C3e, SHA256C3f,
};
-static void sha256_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[8])
+void sha256_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[8])
{
u32 a = digest[0];
u32 b = digest[1];
ROUND_STEP (0);
+ #ifdef _unroll
#pragma unroll
+ #endif
for (int i = 16; i < 64; i += 16)
{
ROUND_EXPAND (); ROUND_STEP (i);
digest[7] += h;
}
-__kernel void m13400_init (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global keepass_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global keepass_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void m13400_init (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global keepass_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global keepass_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
sha256_transform (w0, w1, w2, w3, digest);
- if (esalt_bufs[salt_pos].version == 2)
+ if (esalt_bufs[salt_pos].version == 2 && esalt_bufs[salt_pos].keyfile_len == 0)
{
w0[0] = digest[0];
w0[1] = digest[1];
sha256_transform (w0, w1, w2, w3, digest);
}
+ if (esalt_bufs[salt_pos].keyfile_len != 0)
+ {
+ w0[0] = digest[0];
+ w0[1] = digest[1];
+ w0[2] = digest[2];
+ w0[3] = digest[3];
+
+ w1[0] = digest[4];
+ w1[1] = digest[5];
+ w1[2] = digest[6];
+ w1[3] = digest[7];
+
+ w2[0] = esalt_bufs[salt_pos].keyfile[0];
+ w2[1] = esalt_bufs[salt_pos].keyfile[1];
+ w2[2] = esalt_bufs[salt_pos].keyfile[2];
+ w2[3] = esalt_bufs[salt_pos].keyfile[3];
+
+ w3[0] = esalt_bufs[salt_pos].keyfile[4];
+ w3[1] = esalt_bufs[salt_pos].keyfile[5];
+ w3[3] = esalt_bufs[salt_pos].keyfile[7];
+ w3[2] = esalt_bufs[salt_pos].keyfile[6];
+
+ digest[0] = SHA256M_A;
+ digest[1] = SHA256M_B;
+ digest[2] = SHA256M_C;
+ digest[3] = SHA256M_D;
+ digest[4] = SHA256M_E;
+ digest[5] = SHA256M_F;
+ digest[6] = SHA256M_G;
+ digest[7] = SHA256M_H;
+
+ sha256_transform (w0, w1, w2, w3, digest);
+
+ w0[0] = 0x80000000;
+ w0[1] = 0;
+ w0[2] = 0;
+ w0[3] = 0;
+
+ w1[0] = 0;
+ w1[1] = 0;
+ w1[2] = 0;
+ w1[3] = 0;
+
+ w2[0] = 0;
+ w2[1] = 0;
+ w2[2] = 0;
+ w2[3] = 0;
+
+ w3[0] = 0;
+ w3[1] = 0;
+ w3[2] = 0;
+ w3[3] = 64 * 8;
+
+ sha256_transform (w0, w1, w2, w3, digest);
+ }
+
tmps[gid].tmp_digest[0] = digest[0];
tmps[gid].tmp_digest[1] = digest[1];
tmps[gid].tmp_digest[2] = digest[2];
tmps[gid].tmp_digest[7] = digest[7];
}
-__kernel void m13400_loop (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global keepass_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global keepass_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void m13400_loop (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global keepass_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global keepass_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
tmps[gid].tmp_digest[7] = data1[3];
}
-__kernel void m13400_comp (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global keepass_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global keepass_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void m13400_comp (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global keepass_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global keepass_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
*/
/* Final AES part */
- __local u32 s_td0_final[256];
- __local u32 s_td1_final[256];
- __local u32 s_td2_final[256];
- __local u32 s_td3_final[256];
- __local u32 s_td4_final[256];
-
- __local u32 s_te0_final[256];
- __local u32 s_te1_final[256];
- __local u32 s_te2_final[256];
- __local u32 s_te3_final[256];
- __local u32 s_te4_final[256];
+ __local u32 s_td0[256];
+ __local u32 s_td1[256];
+ __local u32 s_td2[256];
+ __local u32 s_td3[256];
+ __local u32 s_td4[256];
+
+ __local u32 s_te0[256];
+ __local u32 s_te1[256];
+ __local u32 s_te2[256];
+ __local u32 s_te3[256];
+ __local u32 s_te4[256];
for (u32 i = lid; i < 256; i += lsz)
{
- s_td0_final[i] = td0[i];
- s_td1_final[i] = td1[i];
- s_td2_final[i] = td2[i];
- s_td3_final[i] = td3[i];
- s_td4_final[i] = td4[i];
-
- s_te0_final[i] = te0[i];
- s_te1_final[i] = te1[i];
- s_te2_final[i] = te2[i];
- s_te3_final[i] = te3[i];
- s_te4_final[i] = te4[i];
+ s_td0[i] = td0[i];
+ s_td1[i] = td1[i];
+ s_td2[i] = td2[i];
+ s_td3[i] = td3[i];
+ s_td4[i] = td4[i];
+
+ s_te0[i] = te0[i];
+ s_te1[i] = te1[i];
+ s_te2[i] = te2[i];
+ s_te3[i] = te3[i];
+ s_te4[i] = te4[i];
}
barrier (CLK_LOCAL_MEM_FENCE);
w0[1] = final_random_seed[1];
w0[2] = final_random_seed[2];
w0[3] = final_random_seed[3];
-
w1[0] = digest[0];
w1[1] = digest[1];
w1[2] = digest[2];
w1[3] = digest[3];
-
w2[0] = digest[4];
w2[1] = digest[5];
w2[2] = digest[6];
w2[3] = digest[7];
-
w3[0] = 0x80000000;
w3[1] = 0;
w3[2] = 0;
}
else
{
- /* merkle-demgard implementation */
+ /* merkle-damgard implementation */
u32 final_random_seed[8];
final_random_seed[0] = esalt_bufs[salt_pos].final_random_seed[0];
w0[1] = final_random_seed[1];
w0[2] = final_random_seed[2];
w0[3] = final_random_seed[3];
-
w1[0] = final_random_seed[4];
w1[1] = final_random_seed[5];
w1[2] = final_random_seed[6];
w1[3] = final_random_seed[7];
-
w2[0] = digest[0];
w2[1] = digest[1];
w2[2] = digest[2];
w2[3] = digest[3];
-
w3[0] = digest[4];
w3[1] = digest[5];
w3[2] = digest[6];
w0[1] = 0;
w0[2] = 0;
w0[3] = 0;
-
w1[0] = 0;
w1[1] = 0;
w1[2] = 0;
w1[3] = 0;
-
w2[0] = 0;
w2[1] = 0;
w2[2] = 0;
w2[3] = 0;
-
w3[0] = 0;
w3[1] = 0;
w3[2] = 0;
&& esalt_bufs[salt_pos].contents_hash[6] == final_digest[6]
&& esalt_bufs[salt_pos].contents_hash[7] == final_digest[7])
{
- mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos);
-
- d_return_buf[lid] = 1;
+ mark_hash (plains_buf, d_return_buf, salt_pos, 0, digests_offset + 0, gid, il_pos);
}
}
else
u32 final_rk[KEYLEN];
- AES256_ExpandKey (digest, final_rk, s_te0_final, s_te1_final, s_te2_final, s_te3_final, s_te4_final);
+ AES256_ExpandKey (digest, final_rk, s_te0, s_te1, s_te2, s_te3, s_te4);
- AES256_InvertKey (final_rk, s_td0_final, s_td1_final, s_td2_final, s_td3_final, s_td4_final, s_te0_final, s_te1_final, s_te2_final, s_te3_final, s_te4_final);
+ AES256_InvertKey (final_rk, s_td0, s_td1, s_td2, s_td3, s_td4, s_te0, s_te1, s_te2, s_te3, s_te4);
u32 wx[16];
u32 out[4];
- AES256_decrypt (data, out, final_rk, s_td0_final, s_td1_final, s_td2_final, s_td3_final, s_td4_final);
+ AES256_decrypt (data, out, final_rk, s_td0, s_td1, s_td2, s_td3, s_td4);
out[0] ^= iv[0];
out[1] ^= iv[1];
u32 out[4];
- AES256_decrypt (data, out, final_rk, s_td0_final, s_td1_final, s_td2_final, s_td3_final, s_td4_final);
+ AES256_decrypt (data, out, final_rk, s_td0, s_td1, s_td2, s_td3, s_td4);
out[0] ^= iv[0];
out[1] ^= iv[1];
u32 out[4];
- AES256_decrypt (data, out, final_rk, s_td0_final, s_td1_final, s_td2_final, s_td3_final, s_td4_final);
+ AES256_decrypt (data, out, final_rk, s_td0, s_td1, s_td2, s_td3, s_td4);
out[0] ^= iv[0];
out[1] ^= iv[1];
&& esalt_bufs[salt_pos].contents_hash[6] == final_digest[6]
&& esalt_bufs[salt_pos].contents_hash[7] == final_digest[7])
{
- mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos);
-
- d_return_buf[lid] = 1;
+ mark_hash (plains_buf, d_return_buf, salt_pos, 0, digests_offset + 0, gid, il_pos);
}
}
}
u32 final_rk[KEYLEN];
- AES256_ExpandKey (digest, final_rk, s_te0_final, s_te1_final, s_te2_final, s_te3_final, s_te4_final);
+ AES256_ExpandKey (digest, final_rk, s_te0, s_te1, s_te2, s_te3, s_te4);
- AES256_InvertKey (final_rk, s_td0_final, s_td1_final, s_td2_final, s_td3_final, s_td4_final, s_te0_final, s_te1_final, s_te2_final, s_te3_final, s_te4_final);
+ AES256_InvertKey (final_rk, s_td0, s_td1, s_td2, s_td3, s_td4, s_te0, s_te1, s_te2, s_te3, s_te4);
u32 contents_hash[4];
contents_hash[2] = esalt_bufs[salt_pos].contents_hash[2];
contents_hash[3] = esalt_bufs[salt_pos].contents_hash[3];
- AES256_decrypt (contents_hash, out, final_rk, s_td0_final, s_td1_final, s_td2_final, s_td3_final, s_td4_final);
+ AES256_decrypt (contents_hash, out, final_rk, s_td0, s_td1, s_td2, s_td3, s_td4);
out[0] ^= iv[0];
out[1] ^= iv[1];
&& esalt_bufs[salt_pos].expected_bytes[2] == out[2]
&& esalt_bufs[salt_pos].expected_bytes[3] == out[3])
{
- mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos);
-
- d_return_buf[lid] = 1;
+ mark_hash (plains_buf, d_return_buf, salt_pos, 0, digests_offset + 0, gid, il_pos);
}
}
}