2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
31 #include "include/rp_gpu.h"
35 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
36 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
40 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
41 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
45 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
46 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
49 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
77 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
78 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
79 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
80 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
81 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
82 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
83 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
84 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
85 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
86 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
87 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
88 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
89 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
90 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
91 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
92 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
93 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
94 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
95 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
96 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
101 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
102 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
103 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
104 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
105 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
106 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
107 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
108 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
109 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
110 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
111 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
112 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
113 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
114 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
115 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
116 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
117 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
118 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
119 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
120 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
125 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
126 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
127 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
128 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
129 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
130 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
131 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
132 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
133 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
134 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
135 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
136 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
137 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
138 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
139 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
140 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
141 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
142 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
143 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
144 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
149 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
150 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
151 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
152 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
153 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
154 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
155 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
156 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
157 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
158 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
159 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
160 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
161 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
162 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
163 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
164 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
165 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
166 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
167 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
168 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
177 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08300_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
183 const u32 lid = get_local_id (0);
189 const u32 gid = get_global_id (0);
191 if (gid >= gid_max) return;
195 pw_buf0[0] = pws[gid].i[ 0];
196 pw_buf0[1] = pws[gid].i[ 1];
197 pw_buf0[2] = pws[gid].i[ 2];
198 pw_buf0[3] = pws[gid].i[ 3];
202 pw_buf1[0] = pws[gid].i[ 4];
203 pw_buf1[1] = pws[gid].i[ 5];
204 pw_buf1[2] = pws[gid].i[ 6];
205 pw_buf1[3] = pws[gid].i[ 7];
207 const u32 pw_len = pws[gid].pw_len;
213 const u32 salt_iter = salt_bufs[salt_pos].salt_iter;
217 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
218 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
219 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
220 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
224 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
225 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
226 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
227 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
229 const u32 salt_len = salt_bufs[salt_pos].salt_len;
233 domain_buf0[0] = salt_bufs[salt_pos].salt_buf_pc[ 0];
234 domain_buf0[1] = salt_bufs[salt_pos].salt_buf_pc[ 1];
235 domain_buf0[2] = salt_bufs[salt_pos].salt_buf_pc[ 2];
236 domain_buf0[3] = salt_bufs[salt_pos].salt_buf_pc[ 3];
240 domain_buf1[0] = salt_bufs[salt_pos].salt_buf_pc[ 4];
241 domain_buf1[1] = salt_bufs[salt_pos].salt_buf_pc[ 5];
242 domain_buf1[2] = salt_bufs[salt_pos].salt_buf_pc[ 6];
245 const u32 domain_len = salt_bufs[salt_pos].salt_buf_pc[ 7];
251 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
275 const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
305 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, 1);
307 w0_t[0] |= pw_len & 0xff;
315 s0[0] = salt_buf0[0];
316 s0[1] = salt_buf0[1];
317 s0[2] = salt_buf0[2];
318 s0[3] = salt_buf0[3];
322 s1[0] = salt_buf1[0];
323 s1[1] = salt_buf1[1];
324 s1[2] = salt_buf1[2];
325 s1[3] = salt_buf1[3];
341 switch_buffer_by_offset (s0, s1, s2, s3, 1 + out_len + domain_len + 1);
345 d0[0] = domain_buf0[0];
346 d0[1] = domain_buf0[1];
347 d0[2] = domain_buf0[2];
348 d0[3] = domain_buf0[3];
352 d1[0] = domain_buf1[0];
353 d1[1] = domain_buf1[1];
354 d1[2] = domain_buf1[2];
371 switch_buffer_by_offset (d0, d1, d2, d3, 1 + out_len);
379 w0_t2[0] = swap_workaround (w0_t[0] | d0[0] | s0[0]);
380 w0_t2[1] = swap_workaround (w0_t[1] | d0[1] | s0[1]);
381 w0_t2[2] = swap_workaround (w0_t[2] | d0[2] | s0[2]);
382 w0_t2[3] = swap_workaround (w0_t[3] | d0[3] | s0[3]);
386 w1_t2[0] = swap_workaround (w1_t[0] | d1[0] | s1[0]);
387 w1_t2[1] = swap_workaround (w1_t[1] | d1[1] | s1[1]);
388 w1_t2[2] = swap_workaround (w1_t[2] | d1[2] | s1[2]);
389 w1_t2[3] = swap_workaround (w1_t[3] | d1[3] | s1[3]);
393 w2_t2[0] = swap_workaround (w2_t[0] | d2[0] | s2[0]);
394 w2_t2[1] = swap_workaround (w2_t[1] | d2[1] | s2[1]);
395 w2_t2[2] = swap_workaround (w2_t[2] | d2[2] | s2[2]);
396 w2_t2[3] = swap_workaround (w2_t[3] | d2[3] | s2[3]);
400 w3_t2[0] = swap_workaround (w3_t[0] | d3[0] | s3[0]);
401 w3_t2[1] = swap_workaround (w3_t[1] | d3[1] | s3[1]);
403 w3_t2[3] = (1 + out_len + domain_len + 1 + salt_len) * 8;
413 sha1_transform (w0_t2, w1_t2, w2_t2, w3_t2, digest);
417 for (u32 i = 0; i < salt_iter; i++)
421 w0_t3[0] = digest[0];
422 w0_t3[1] = digest[1];
423 w0_t3[2] = digest[2];
424 w0_t3[3] = digest[3];
428 w1_t3[0] = digest[4];
429 w1_t3[1] = swap_workaround (salt_buf0[0]);
430 w1_t3[2] = swap_workaround (salt_buf0[1]);
431 w1_t3[3] = swap_workaround (salt_buf0[2]);
435 w2_t3[0] = swap_workaround (salt_buf0[3]);
436 w2_t3[1] = swap_workaround (salt_buf1[0]);
437 w2_t3[2] = swap_workaround (salt_buf1[1]);
438 w2_t3[3] = swap_workaround (salt_buf1[2]);
442 w3_t3[0] = swap_workaround (salt_buf1[3]);
445 w3_t3[3] = (20 + salt_len) * 8;
453 sha1_transform (w0_t3, w1_t3, w2_t3, w3_t3, digest);
456 const u32x r0 = digest[3];
457 const u32x r1 = digest[4];
458 const u32x r2 = digest[2];
459 const u32x r3 = digest[1];
461 #include VECT_COMPARE_M
465 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08300_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
469 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08300_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
473 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08300_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
479 const u32 lid = get_local_id (0);
485 const u32 gid = get_global_id (0);
487 if (gid >= gid_max) return;
491 pw_buf0[0] = pws[gid].i[ 0];
492 pw_buf0[1] = pws[gid].i[ 1];
493 pw_buf0[2] = pws[gid].i[ 2];
494 pw_buf0[3] = pws[gid].i[ 3];
498 pw_buf1[0] = pws[gid].i[ 4];
499 pw_buf1[1] = pws[gid].i[ 5];
500 pw_buf1[2] = pws[gid].i[ 6];
501 pw_buf1[3] = pws[gid].i[ 7];
503 const u32 pw_len = pws[gid].pw_len;
509 const u32 salt_iter = salt_bufs[salt_pos].salt_iter;
513 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
514 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
515 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
516 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
520 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
521 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
522 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
523 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
525 const u32 salt_len = salt_bufs[salt_pos].salt_len;
529 domain_buf0[0] = salt_bufs[salt_pos].salt_buf_pc[ 0];
530 domain_buf0[1] = salt_bufs[salt_pos].salt_buf_pc[ 1];
531 domain_buf0[2] = salt_bufs[salt_pos].salt_buf_pc[ 2];
532 domain_buf0[3] = salt_bufs[salt_pos].salt_buf_pc[ 3];
536 domain_buf1[0] = salt_bufs[salt_pos].salt_buf_pc[ 4];
537 domain_buf1[1] = salt_bufs[salt_pos].salt_buf_pc[ 5];
538 domain_buf1[2] = salt_bufs[salt_pos].salt_buf_pc[ 6];
541 const u32 domain_len = salt_bufs[salt_pos].salt_buf_pc[ 7];
547 const u32 search[4] =
549 digests_buf[digests_offset].digest_buf[DGST_R0],
550 digests_buf[digests_offset].digest_buf[DGST_R1],
551 digests_buf[digests_offset].digest_buf[DGST_R2],
552 digests_buf[digests_offset].digest_buf[DGST_R3]
559 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
583 const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
613 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, 1);
615 w0_t[0] |= pw_len & 0xff;
623 s0[0] = salt_buf0[0];
624 s0[1] = salt_buf0[1];
625 s0[2] = salt_buf0[2];
626 s0[3] = salt_buf0[3];
630 s1[0] = salt_buf1[0];
631 s1[1] = salt_buf1[1];
632 s1[2] = salt_buf1[2];
633 s1[3] = salt_buf1[3];
649 switch_buffer_by_offset (s0, s1, s2, s3, 1 + out_len + domain_len + 1);
653 d0[0] = domain_buf0[0];
654 d0[1] = domain_buf0[1];
655 d0[2] = domain_buf0[2];
656 d0[3] = domain_buf0[3];
660 d1[0] = domain_buf1[0];
661 d1[1] = domain_buf1[1];
662 d1[2] = domain_buf1[2];
679 switch_buffer_by_offset (d0, d1, d2, d3, 1 + out_len);
687 w0_t2[0] = swap_workaround (w0_t[0] | d0[0] | s0[0]);
688 w0_t2[1] = swap_workaround (w0_t[1] | d0[1] | s0[1]);
689 w0_t2[2] = swap_workaround (w0_t[2] | d0[2] | s0[2]);
690 w0_t2[3] = swap_workaround (w0_t[3] | d0[3] | s0[3]);
694 w1_t2[0] = swap_workaround (w1_t[0] | d1[0] | s1[0]);
695 w1_t2[1] = swap_workaround (w1_t[1] | d1[1] | s1[1]);
696 w1_t2[2] = swap_workaround (w1_t[2] | d1[2] | s1[2]);
697 w1_t2[3] = swap_workaround (w1_t[3] | d1[3] | s1[3]);
701 w2_t2[0] = swap_workaround (w2_t[0] | d2[0] | s2[0]);
702 w2_t2[1] = swap_workaround (w2_t[1] | d2[1] | s2[1]);
703 w2_t2[2] = swap_workaround (w2_t[2] | d2[2] | s2[2]);
704 w2_t2[3] = swap_workaround (w2_t[3] | d2[3] | s2[3]);
708 w3_t2[0] = swap_workaround (w3_t[0] | d3[0] | s3[0]);
709 w3_t2[1] = swap_workaround (w3_t[1] | d3[1] | s3[1]);
711 w3_t2[3] = (1 + out_len + domain_len + 1 + salt_len) * 8;
721 sha1_transform (w0_t2, w1_t2, w2_t2, w3_t2, digest);
725 for (u32 i = 0; i < salt_iter; i++)
729 w0_t3[0] = digest[0];
730 w0_t3[1] = digest[1];
731 w0_t3[2] = digest[2];
732 w0_t3[3] = digest[3];
736 w1_t3[0] = digest[4];
737 w1_t3[1] = swap_workaround (salt_buf0[0]);
738 w1_t3[2] = swap_workaround (salt_buf0[1]);
739 w1_t3[3] = swap_workaround (salt_buf0[2]);
743 w2_t3[0] = swap_workaround (salt_buf0[3]);
744 w2_t3[1] = swap_workaround (salt_buf1[0]);
745 w2_t3[2] = swap_workaround (salt_buf1[1]);
746 w2_t3[3] = swap_workaround (salt_buf1[2]);
750 w3_t3[0] = swap_workaround (salt_buf1[3]);
753 w3_t3[3] = (20 + salt_len) * 8;
761 sha1_transform (w0_t3, w1_t3, w2_t3, w3_t3, digest);
764 const u32x r0 = digest[3];
765 const u32x r1 = digest[4];
766 const u32x r2 = digest[2];
767 const u32x r3 = digest[1];
769 #include VECT_COMPARE_S
773 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08300_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
777 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08300_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)