#define _PSAFE2_
-#include "include/constants.h"
-#include "include/kernel_vendor.h"
+#include "inc_vendor.cl"
+#include "inc_hash_constants.h"
+#include "inc_hash_functions.cl"
+#include "inc_types.cl"
+#include "inc_common.cl"
-#define DGST_R0 0
-#define DGST_R1 1
-#define DGST_R2 2
-#define DGST_R3 3
-
-#include "include/kernel_functions.c"
-#include "types_ocl.c"
-#include "common.c"
-
-#ifdef VECT_SIZE1
-#define COMPARE_M "check_multi_vect1_comp4.c"
-#endif
+#define COMPARE_S "inc_comp_single.cl"
+#define COMPARE_M "inc_comp_multi.cl"
// http://www.schneier.com/code/constants.txt
0x9216d5d9, 0x8979fb1b
};
+#ifdef IS_AMD
#define BF_ROUND(L,R,N) \
{ \
uchar4 c = as_uchar4 ((L)); \
\
- u32 tmp; \
+ u32 tmp; \
\
tmp = S0[c.s3]; \
tmp += S1[c.s2]; \
\
(R) ^= tmp ^ P[(N)]; \
}
+#endif
+
+#ifdef IS_NV
+#define BF_ROUND(L,R,N) \
+{ \
+ u32 tmp; \
+ \
+ tmp = S0[__bfe_S ((L), 24, 8)]; \
+ tmp += S1[__bfe_S ((L), 16, 8)]; \
+ tmp ^= S2[__bfe_S ((L), 8, 8)]; \
+ tmp += S3[__bfe_S ((L), 0, 8)]; \
+ \
+ (R) ^= tmp ^ P[(N)]; \
+}
+#endif
+
+#ifdef IS_GENERIC
+#define BF_ROUND(L,R,N) \
+{ \
+ uchar4 c = as_uchar4 ((L)); \
+ \
+ u32 tmp; \
+ \
+ tmp = S0[c.s3]; \
+ tmp += S1[c.s2]; \
+ tmp ^= S2[c.s1]; \
+ tmp += S3[c.s0]; \
+ \
+ (R) ^= tmp ^ P[(N)]; \
+}
+#endif
#define BF_ENCRYPT(L,R) \
{ \
BF_ROUND (L, R, 15); \
BF_ROUND (R, L, 16); \
\
- u32 tmp; \
+ u32 tmp; \
\
tmp = R; \
R = L; \
L ^= P[17]; \
}
-static void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5])
+void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5])
{
u32 A = digest[0];
u32 B = digest[1];
digest[4] += E;
}
-__kernel void __attribute__((reqd_work_group_size (8, 1, 1))) m09000_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pwsafe2_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void __attribute__((reqd_work_group_size (8, 1, 1))) m09000_init (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pwsafe2_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
const u32 pw_len = pws[gid].pw_len;
- append_0x80_4 (w0, w1, w2, w3, pw_len);
+ append_0x80_4x4 (w0, w1, w2, w3, pw_len);
/**
* salt
w0[1] = salt_buf[1];
w0[0] = salt_buf[0];
- w0[0] = swap_workaround (w0[0]);
- w0[1] = swap_workaround (w0[1]);
- w0[2] = swap_workaround (w0[2]);
- w0[3] = swap_workaround (w0[3]);
- w1[0] = swap_workaround (w1[0]);
- w1[1] = swap_workaround (w1[1]);
- w1[2] = swap_workaround (w1[2]);
- w1[3] = swap_workaround (w1[3]);
- w2[0] = swap_workaround (w2[0]);
- w2[1] = swap_workaround (w2[1]);
- w2[2] = swap_workaround (w2[2]);
- w2[3] = swap_workaround (w2[3]);
- w3[0] = swap_workaround (w3[0]);
- w3[1] = swap_workaround (w3[1]);
+ w0[0] = swap32 (w0[0]);
+ w0[1] = swap32 (w0[1]);
+ w0[2] = swap32 (w0[2]);
+ w0[3] = swap32 (w0[3]);
+ w1[0] = swap32 (w1[0]);
+ w1[1] = swap32 (w1[1]);
+ w1[2] = swap32 (w1[2]);
+ w1[3] = swap32 (w1[3]);
+ w2[0] = swap32 (w2[0]);
+ w2[1] = swap32 (w2[1]);
+ w2[2] = swap32 (w2[2]);
+ w2[3] = swap32 (w2[3]);
+ w3[0] = swap32 (w3[0]);
+ w3[1] = swap32 (w3[1]);
const u32 block_len = salt_len + 2 + pw_len;
}
}
-__kernel void __attribute__((reqd_work_group_size (8, 1, 1))) m09000_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pwsafe2_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void __attribute__((reqd_work_group_size (8, 1, 1))) m09000_loop (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pwsafe2_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
u32 P[18];
+ #pragma unroll
for (u32 i = 0; i < 18; i++)
{
P[i] = tmps[gid].P[i];
__local u32 *S2 = S2_all[lid];
__local u32 *S3 = S3_all[lid];
+ #pragma unroll
for (u32 i = 0; i < 256; i++)
{
S0[i] = tmps[gid].S0[i];
tmps[gid].digest[1] = R0;
}
-__kernel void __attribute__((reqd_work_group_size (8, 1, 1))) m09000_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pwsafe2_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void __attribute__((reqd_work_group_size (8, 1, 1))) m09000_comp (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pwsafe2_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
u32 w2[4];
u32 w3[4];
- w0[0] = swap_workaround (digest[0]);
- w0[1] = swap_workaround (digest[1]);
+ w0[0] = swap32 (digest[0]);
+ w0[1] = swap32 (digest[1]);
w0[2] = 0x00008000;
w0[3] = 0;
w1[0] = 0;