2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
31 #include "include/rp_gpu.h"
35 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
36 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
40 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
41 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
45 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
46 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
49 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
77 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
78 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
79 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
80 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
81 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
82 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
83 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
84 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
85 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
86 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
87 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
88 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
89 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
90 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
91 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
92 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
93 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
94 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
95 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
96 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
101 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
102 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
103 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
104 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
105 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
106 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
107 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
108 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
109 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
110 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
111 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
112 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
113 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
114 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
115 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
116 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
117 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
118 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
119 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
120 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
125 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
126 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
127 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
128 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
129 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
130 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
131 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
132 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
133 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
134 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
135 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
136 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
137 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
138 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
139 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
140 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
141 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
142 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
143 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
144 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
149 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
150 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
151 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
152 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
153 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
154 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
155 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
156 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
157 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
158 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
159 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
160 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
161 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
162 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
163 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
164 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
165 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
166 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
167 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
168 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
177 static void hmac_sha1_pad (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5], u32x opad[5])
179 w0[0] = w0[0] ^ 0x36363636;
180 w0[1] = w0[1] ^ 0x36363636;
181 w0[2] = w0[2] ^ 0x36363636;
182 w0[3] = w0[3] ^ 0x36363636;
183 w1[0] = w1[0] ^ 0x36363636;
184 w1[1] = w1[1] ^ 0x36363636;
185 w1[2] = w1[2] ^ 0x36363636;
186 w1[3] = w1[3] ^ 0x36363636;
187 w2[0] = w2[0] ^ 0x36363636;
188 w2[1] = w2[1] ^ 0x36363636;
189 w2[2] = w2[2] ^ 0x36363636;
190 w2[3] = w2[3] ^ 0x36363636;
191 w3[0] = w3[0] ^ 0x36363636;
192 w3[1] = w3[1] ^ 0x36363636;
193 w3[2] = w3[2] ^ 0x36363636;
194 w3[3] = w3[3] ^ 0x36363636;
202 sha1_transform (w0, w1, w2, w3, ipad);
204 w0[0] = w0[0] ^ 0x6a6a6a6a;
205 w0[1] = w0[1] ^ 0x6a6a6a6a;
206 w0[2] = w0[2] ^ 0x6a6a6a6a;
207 w0[3] = w0[3] ^ 0x6a6a6a6a;
208 w1[0] = w1[0] ^ 0x6a6a6a6a;
209 w1[1] = w1[1] ^ 0x6a6a6a6a;
210 w1[2] = w1[2] ^ 0x6a6a6a6a;
211 w1[3] = w1[3] ^ 0x6a6a6a6a;
212 w2[0] = w2[0] ^ 0x6a6a6a6a;
213 w2[1] = w2[1] ^ 0x6a6a6a6a;
214 w2[2] = w2[2] ^ 0x6a6a6a6a;
215 w2[3] = w2[3] ^ 0x6a6a6a6a;
216 w3[0] = w3[0] ^ 0x6a6a6a6a;
217 w3[1] = w3[1] ^ 0x6a6a6a6a;
218 w3[2] = w3[2] ^ 0x6a6a6a6a;
219 w3[3] = w3[3] ^ 0x6a6a6a6a;
227 sha1_transform (w0, w1, w2, w3, opad);
230 static void hmac_sha1_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5], u32x opad[5], u32x digest[5])
238 sha1_transform (w0, w1, w2, w3, digest);
255 w3[3] = (64 + 20) * 8;
263 sha1_transform (w0, w1, w2, w3, digest);
266 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07300_m04 (__global pw_t *pws, __global gpu_rule_t * rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global rakp_t *rakp_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
272 const u32 lid = get_local_id (0);
278 const u32 gid = get_global_id (0);
280 if (gid >= gid_max) return;
284 pw_buf0[0] = pws[gid].i[ 0];
285 pw_buf0[1] = pws[gid].i[ 1];
286 pw_buf0[2] = pws[gid].i[ 2];
287 pw_buf0[3] = pws[gid].i[ 3];
291 pw_buf1[0] = pws[gid].i[ 4];
292 pw_buf1[1] = pws[gid].i[ 5];
293 pw_buf1[2] = pws[gid].i[ 6];
294 pw_buf1[3] = pws[gid].i[ 7];
296 const u32 pw_len = pws[gid].pw_len;
302 const u32 esalt_len = rakp_bufs[salt_pos].salt_len;
308 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
338 const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
346 w0_t[0] = swap_workaround (w0[0]);
347 w0_t[1] = swap_workaround (w0[1]);
348 w0_t[2] = swap_workaround (w0[2]);
349 w0_t[3] = swap_workaround (w0[3]);
353 w1_t[0] = swap_workaround (w1[0]);
354 w1_t[1] = swap_workaround (w1[1]);
355 w1_t[2] = swap_workaround (w1[2]);
356 w1_t[3] = swap_workaround (w1[3]);
375 hmac_sha1_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
377 int esalt_size = esalt_len;
382 for (esalt_left = esalt_size, esalt_off = 0; esalt_left >= 56; esalt_left -= 64, esalt_off += 16)
384 w0_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 0];
385 w0_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 1];
386 w0_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 2];
387 w0_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 3];
388 w1_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 4];
389 w1_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 5];
390 w1_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 6];
391 w1_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 7];
392 w2_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 8];
393 w2_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 9];
394 w2_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 10];
395 w2_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 11];
396 w3_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 12];
397 w3_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 13];
398 w3_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 14];
399 w3_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 15];
401 sha1_transform (w0_t, w1_t, w2_t, w3_t, ipad);
404 w0_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 0];
405 w0_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 1];
406 w0_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 2];
407 w0_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 3];
408 w1_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 4];
409 w1_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 5];
410 w1_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 6];
411 w1_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 7];
412 w2_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 8];
413 w2_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 9];
414 w2_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 10];
415 w2_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 11];
416 w3_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 12];
417 w3_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 13];
419 w3_t[3] = (64 + esalt_size) * 8;
423 hmac_sha1_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
425 const u32x r0 = digest[3];
426 const u32x r1 = digest[4];
427 const u32x r2 = digest[2];
428 const u32x r3 = digest[1];
430 #include VECT_COMPARE_M
434 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07300_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global rakp_t *rakp_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
438 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07300_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global rakp_t *rakp_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
442 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07300_s04 (__global pw_t *pws, __global gpu_rule_t * rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global rakp_t *rakp_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
448 const u32 lid = get_local_id (0);
454 const u32 gid = get_global_id (0);
456 if (gid >= gid_max) return;
460 pw_buf0[0] = pws[gid].i[ 0];
461 pw_buf0[1] = pws[gid].i[ 1];
462 pw_buf0[2] = pws[gid].i[ 2];
463 pw_buf0[3] = pws[gid].i[ 3];
467 pw_buf1[0] = pws[gid].i[ 4];
468 pw_buf1[1] = pws[gid].i[ 5];
469 pw_buf1[2] = pws[gid].i[ 6];
470 pw_buf1[3] = pws[gid].i[ 7];
472 const u32 pw_len = pws[gid].pw_len;
478 const u32 esalt_len = rakp_bufs[salt_pos].salt_len;
484 const u32 search[4] =
486 digests_buf[digests_offset].digest_buf[DGST_R0],
487 digests_buf[digests_offset].digest_buf[DGST_R1],
488 digests_buf[digests_offset].digest_buf[DGST_R2],
489 digests_buf[digests_offset].digest_buf[DGST_R3]
496 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
526 const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
534 w0_t[0] = swap_workaround (w0[0]);
535 w0_t[1] = swap_workaround (w0[1]);
536 w0_t[2] = swap_workaround (w0[2]);
537 w0_t[3] = swap_workaround (w0[3]);
541 w1_t[0] = swap_workaround (w1[0]);
542 w1_t[1] = swap_workaround (w1[1]);
543 w1_t[2] = swap_workaround (w1[2]);
544 w1_t[3] = swap_workaround (w1[3]);
563 hmac_sha1_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
565 int esalt_size = esalt_len;
570 for (esalt_left = esalt_size, esalt_off = 0; esalt_left >= 56; esalt_left -= 64, esalt_off += 16)
572 w0_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 0];
573 w0_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 1];
574 w0_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 2];
575 w0_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 3];
576 w1_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 4];
577 w1_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 5];
578 w1_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 6];
579 w1_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 7];
580 w2_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 8];
581 w2_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 9];
582 w2_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 10];
583 w2_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 11];
584 w3_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 12];
585 w3_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 13];
586 w3_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 14];
587 w3_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 15];
589 sha1_transform (w0_t, w1_t, w2_t, w3_t, ipad);
592 w0_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 0];
593 w0_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 1];
594 w0_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 2];
595 w0_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 3];
596 w1_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 4];
597 w1_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 5];
598 w1_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 6];
599 w1_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 7];
600 w2_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 8];
601 w2_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 9];
602 w2_t[2] = rakp_bufs[salt_pos].salt_buf[esalt_off + 10];
603 w2_t[3] = rakp_bufs[salt_pos].salt_buf[esalt_off + 11];
604 w3_t[0] = rakp_bufs[salt_pos].salt_buf[esalt_off + 12];
605 w3_t[1] = rakp_bufs[salt_pos].salt_buf[esalt_off + 13];
607 w3_t[3] = (64 + esalt_size) * 8;
611 hmac_sha1_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
613 const u32x r0 = digest[3];
614 const u32x r1 = digest[4];
615 const u32x r2 = digest[2];
616 const u32x r3 = digest[1];
618 #include VECT_COMPARE_S
622 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07300_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global rakp_t *rakp_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
626 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07300_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global rakp_t *rakp_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)