2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
31 #include "include/rp_gpu.h"
35 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
36 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
40 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
41 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
45 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
46 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
57 static void swap (__local RC4_KEY *rc4_key, const u8 i, const u8 j)
62 rc4_key->S[i] = rc4_key->S[j];
66 static void rc4_init_16 (__local RC4_KEY *rc4_key, const u32 data[4])
71 __local u32 *ptr = (__local u32 *) rc4_key->S;
74 for (u32 i = 0; i < 64; i++)
82 for (u32 i = 0; i < 16; i++)
90 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
91 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
92 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
93 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
97 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
98 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
99 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
100 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
104 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
105 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
106 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
107 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
111 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
112 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
113 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
114 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
118 static u8 rc4_next_16 (__local RC4_KEY *rc4_key, u8 i, u8 j, const u32 in[4], u32 out[4])
121 for (u32 k = 0; k < 4; k++)
130 swap (rc4_key, i, j);
132 idx = rc4_key->S[i] + rc4_key->S[j];
134 xor4 |= rc4_key->S[idx] << 0;
139 swap (rc4_key, i, j);
141 idx = rc4_key->S[i] + rc4_key->S[j];
143 xor4 |= rc4_key->S[idx] << 8;
148 swap (rc4_key, i, j);
150 idx = rc4_key->S[i] + rc4_key->S[j];
152 xor4 |= rc4_key->S[idx] << 16;
157 swap (rc4_key, i, j);
159 idx = rc4_key->S[i] + rc4_key->S[j];
161 xor4 |= rc4_key->S[idx] << 24;
163 out[k] = in[k] ^ xor4;
169 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
197 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
198 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
199 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
200 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
201 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
202 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
203 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
204 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
205 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
206 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
207 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
208 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
209 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
210 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
211 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
212 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
213 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
214 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
215 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
216 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
221 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
222 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
223 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
224 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
225 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
226 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
227 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
228 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
229 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
230 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
231 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
232 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
233 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
234 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
235 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
236 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
237 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
238 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
239 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
240 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
245 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
246 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
247 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
248 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
249 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
250 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
251 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
252 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
253 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
254 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
255 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
256 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
257 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
258 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
259 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
260 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
261 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
262 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
263 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
264 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
269 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
270 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
271 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
272 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
273 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
274 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
275 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
276 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
277 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
278 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
279 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
280 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
281 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
282 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
283 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
284 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
285 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
286 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
287 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
288 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
297 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09800_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global oldoffice34_t *oldoffice34_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
303 const u32 lid = get_local_id (0);
305 __local RC4_KEY rc4_keys[64];
307 __local RC4_KEY *rc4_key = &rc4_keys[lid];
313 const u32 gid = get_global_id (0);
315 if (gid >= gid_max) return;
319 pw_buf0[0] = pws[gid].i[ 0];
320 pw_buf0[1] = pws[gid].i[ 1];
321 pw_buf0[2] = pws[gid].i[ 2];
322 pw_buf0[3] = pws[gid].i[ 3];
326 pw_buf1[0] = pws[gid].i[ 4];
327 pw_buf1[1] = pws[gid].i[ 5];
328 pw_buf1[2] = pws[gid].i[ 6];
329 pw_buf1[3] = pws[gid].i[ 7];
331 const u32 pw_len = pws[gid].pw_len;
339 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
340 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
341 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
342 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
344 const u32 salt_len = 16;
350 const u32 version = oldoffice34_bufs[salt_pos].version;
352 u32 encryptedVerifier[4];
354 encryptedVerifier[0] = oldoffice34_bufs[salt_pos].encryptedVerifier[0];
355 encryptedVerifier[1] = oldoffice34_bufs[salt_pos].encryptedVerifier[1];
356 encryptedVerifier[2] = oldoffice34_bufs[salt_pos].encryptedVerifier[2];
357 encryptedVerifier[3] = oldoffice34_bufs[salt_pos].encryptedVerifier[3];
363 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
393 const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
395 const u32 pw_salt_len = (out_len * 2) + salt_len;
397 append_0x80_2 (w0, w1, out_len);
404 make_unicode (w0, w0_t, w1_t);
405 make_unicode (w1, w2_t, w3_t);
407 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, salt_len);
409 w0_t[0] = salt_buf[0];
410 w0_t[1] = salt_buf[1];
411 w0_t[2] = salt_buf[2];
412 w0_t[3] = salt_buf[3];
413 w1_t[0] = swap_workaround (w1_t[0]);
414 w1_t[1] = swap_workaround (w1_t[1]);
415 w1_t[2] = swap_workaround (w1_t[2]);
416 w1_t[3] = swap_workaround (w1_t[3]);
417 w2_t[0] = swap_workaround (w2_t[0]);
418 w2_t[1] = swap_workaround (w2_t[1]);
419 w2_t[2] = swap_workaround (w2_t[2]);
420 w2_t[3] = swap_workaround (w2_t[3]);
421 w3_t[0] = swap_workaround (w3_t[0]);
422 w3_t[1] = swap_workaround (w3_t[1]);
424 w3_t[3] = pw_salt_len * 8;
434 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
442 w1_t[2] = 0x80000000;
451 w3_t[3] = (20 + 4) * 8;
459 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
463 key[0] = swap_workaround (digest[0]);
464 key[1] = swap_workaround (digest[1]);
465 key[2] = swap_workaround (digest[2]);
466 key[3] = swap_workaround (digest[3]);
475 rc4_init_16 (rc4_key, key);
479 u8 j = rc4_next_16 (rc4_key, 0, 0, encryptedVerifier, out);
481 w0_t[0] = swap_workaround (out[0]);
482 w0_t[1] = swap_workaround (out[1]);
483 w0_t[2] = swap_workaround (out[2]);
484 w0_t[3] = swap_workaround (out[3]);
485 w1_t[0] = 0x80000000;
504 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
506 digest[0] = swap_workaround (digest[0]);
507 digest[1] = swap_workaround (digest[1]);
508 digest[2] = swap_workaround (digest[2]);
509 digest[3] = swap_workaround (digest[3]);
511 rc4_next_16 (rc4_key, 16, j, digest, out);
513 const u32x r0 = out[0];
514 const u32x r1 = out[1];
515 const u32x r2 = out[2];
516 const u32x r3 = out[3];
518 #include VECT_COMPARE_M
522 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09800_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global oldoffice34_t *oldoffice34_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
526 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09800_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global oldoffice34_t *oldoffice34_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
530 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09800_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global oldoffice34_t *oldoffice34_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
536 const u32 lid = get_local_id (0);
538 __local RC4_KEY rc4_keys[64];
540 __local RC4_KEY *rc4_key = &rc4_keys[lid];
546 const u32 gid = get_global_id (0);
548 if (gid >= gid_max) return;
552 pw_buf0[0] = pws[gid].i[ 0];
553 pw_buf0[1] = pws[gid].i[ 1];
554 pw_buf0[2] = pws[gid].i[ 2];
555 pw_buf0[3] = pws[gid].i[ 3];
559 pw_buf1[0] = pws[gid].i[ 4];
560 pw_buf1[1] = pws[gid].i[ 5];
561 pw_buf1[2] = pws[gid].i[ 6];
562 pw_buf1[3] = pws[gid].i[ 7];
564 const u32 pw_len = pws[gid].pw_len;
570 const u32 search[4] =
572 digests_buf[digests_offset].digest_buf[DGST_R0],
573 digests_buf[digests_offset].digest_buf[DGST_R1],
574 digests_buf[digests_offset].digest_buf[DGST_R2],
575 digests_buf[digests_offset].digest_buf[DGST_R3]
584 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
585 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
586 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
587 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
589 const u32 salt_len = 16;
595 const u32 version = oldoffice34_bufs[salt_pos].version;
597 u32 encryptedVerifier[4];
599 encryptedVerifier[0] = oldoffice34_bufs[salt_pos].encryptedVerifier[0];
600 encryptedVerifier[1] = oldoffice34_bufs[salt_pos].encryptedVerifier[1];
601 encryptedVerifier[2] = oldoffice34_bufs[salt_pos].encryptedVerifier[2];
602 encryptedVerifier[3] = oldoffice34_bufs[salt_pos].encryptedVerifier[3];
608 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
638 const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
640 const u32 pw_salt_len = (out_len * 2) + salt_len;
642 append_0x80_2 (w0, w1, out_len);
649 make_unicode (w0, w0_t, w1_t);
650 make_unicode (w1, w2_t, w3_t);
652 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, salt_len);
654 w0_t[0] = salt_buf[0];
655 w0_t[1] = salt_buf[1];
656 w0_t[2] = salt_buf[2];
657 w0_t[3] = salt_buf[3];
658 w1_t[0] = swap_workaround (w1_t[0]);
659 w1_t[1] = swap_workaround (w1_t[1]);
660 w1_t[2] = swap_workaround (w1_t[2]);
661 w1_t[3] = swap_workaround (w1_t[3]);
662 w2_t[0] = swap_workaround (w2_t[0]);
663 w2_t[1] = swap_workaround (w2_t[1]);
664 w2_t[2] = swap_workaround (w2_t[2]);
665 w2_t[3] = swap_workaround (w2_t[3]);
666 w3_t[0] = swap_workaround (w3_t[0]);
667 w3_t[1] = swap_workaround (w3_t[1]);
669 w3_t[3] = pw_salt_len * 8;
679 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
687 w1_t[2] = 0x80000000;
696 w3_t[3] = (20 + 4) * 8;
704 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
708 key[0] = swap_workaround (digest[0]);
709 key[1] = swap_workaround (digest[1]);
710 key[2] = swap_workaround (digest[2]);
711 key[3] = swap_workaround (digest[3]);
720 rc4_init_16 (rc4_key, key);
724 u8 j = rc4_next_16 (rc4_key, 0, 0, encryptedVerifier, out);
726 w0_t[0] = swap_workaround (out[0]);
727 w0_t[1] = swap_workaround (out[1]);
728 w0_t[2] = swap_workaround (out[2]);
729 w0_t[3] = swap_workaround (out[3]);
730 w1_t[0] = 0x80000000;
749 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
751 digest[0] = swap_workaround (digest[0]);
752 digest[1] = swap_workaround (digest[1]);
753 digest[2] = swap_workaround (digest[2]);
754 digest[3] = swap_workaround (digest[3]);
756 rc4_next_16 (rc4_key, 16, j, digest, out);
758 const u32x r0 = out[0];
759 const u32x r1 = out[1];
760 const u32x r2 = out[2];
761 const u32x r3 = out[3];
763 #include VECT_COMPARE_S
767 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09800_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global oldoffice34_t *oldoffice34_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
771 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09800_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global oldoffice34_t *oldoffice34_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)