2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
16 #include "include/kernel_functions.c"
17 #include "types_ocl.c"
20 #define COMPARE_S "check_single_comp4.c"
21 #define COMPARE_M "check_multi_comp4.c"
23 __constant u64 k_sha512[80] =
25 SHA512C00, SHA512C01, SHA512C02, SHA512C03,
26 SHA512C04, SHA512C05, SHA512C06, SHA512C07,
27 SHA512C08, SHA512C09, SHA512C0a, SHA512C0b,
28 SHA512C0c, SHA512C0d, SHA512C0e, SHA512C0f,
29 SHA512C10, SHA512C11, SHA512C12, SHA512C13,
30 SHA512C14, SHA512C15, SHA512C16, SHA512C17,
31 SHA512C18, SHA512C19, SHA512C1a, SHA512C1b,
32 SHA512C1c, SHA512C1d, SHA512C1e, SHA512C1f,
33 SHA512C20, SHA512C21, SHA512C22, SHA512C23,
34 SHA512C24, SHA512C25, SHA512C26, SHA512C27,
35 SHA512C28, SHA512C29, SHA512C2a, SHA512C2b,
36 SHA512C2c, SHA512C2d, SHA512C2e, SHA512C2f,
37 SHA512C30, SHA512C31, SHA512C32, SHA512C33,
38 SHA512C34, SHA512C35, SHA512C36, SHA512C37,
39 SHA512C38, SHA512C39, SHA512C3a, SHA512C3b,
40 SHA512C3c, SHA512C3d, SHA512C3e, SHA512C3f,
41 SHA512C40, SHA512C41, SHA512C42, SHA512C43,
42 SHA512C44, SHA512C45, SHA512C46, SHA512C47,
43 SHA512C48, SHA512C49, SHA512C4a, SHA512C4b,
44 SHA512C4c, SHA512C4d, SHA512C4e, SHA512C4f,
47 static void sha512_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u64 digest[8])
49 u64 w0_t = hl32_to_64 (w0[0], w0[1]);
50 u64 w1_t = hl32_to_64 (w0[2], w0[3]);
51 u64 w2_t = hl32_to_64 (w1[0], w1[1]);
52 u64 w3_t = hl32_to_64 (w1[2], w1[3]);
53 u64 w4_t = hl32_to_64 (w2[0], w2[1]);
54 u64 w5_t = hl32_to_64 (w2[2], w2[3]);
55 u64 w6_t = hl32_to_64 (w3[0], w3[1]);
64 u64 wf_t = hl32_to_64 (w3[2], w3[3]);
75 #define ROUND_EXPAND() \
77 w0_t = SHA512_EXPAND (we_t, w9_t, w1_t, w0_t); \
78 w1_t = SHA512_EXPAND (wf_t, wa_t, w2_t, w1_t); \
79 w2_t = SHA512_EXPAND (w0_t, wb_t, w3_t, w2_t); \
80 w3_t = SHA512_EXPAND (w1_t, wc_t, w4_t, w3_t); \
81 w4_t = SHA512_EXPAND (w2_t, wd_t, w5_t, w4_t); \
82 w5_t = SHA512_EXPAND (w3_t, we_t, w6_t, w5_t); \
83 w6_t = SHA512_EXPAND (w4_t, wf_t, w7_t, w6_t); \
84 w7_t = SHA512_EXPAND (w5_t, w0_t, w8_t, w7_t); \
85 w8_t = SHA512_EXPAND (w6_t, w1_t, w9_t, w8_t); \
86 w9_t = SHA512_EXPAND (w7_t, w2_t, wa_t, w9_t); \
87 wa_t = SHA512_EXPAND (w8_t, w3_t, wb_t, wa_t); \
88 wb_t = SHA512_EXPAND (w9_t, w4_t, wc_t, wb_t); \
89 wc_t = SHA512_EXPAND (wa_t, w5_t, wd_t, wc_t); \
90 wd_t = SHA512_EXPAND (wb_t, w6_t, we_t, wd_t); \
91 we_t = SHA512_EXPAND (wc_t, w7_t, wf_t, we_t); \
92 wf_t = SHA512_EXPAND (wd_t, w8_t, w0_t, wf_t); \
95 #define ROUND_STEP(i) \
97 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha512[i + 0]); \
98 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha512[i + 1]); \
99 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha512[i + 2]); \
100 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha512[i + 3]); \
101 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha512[i + 4]); \
102 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha512[i + 5]); \
103 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha512[i + 6]); \
104 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha512[i + 7]); \
105 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha512[i + 8]); \
106 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha512[i + 9]); \
107 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha512[i + 10]); \
108 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha512[i + 11]); \
109 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha512[i + 12]); \
110 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha512[i + 13]); \
111 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, we_t, k_sha512[i + 14]); \
112 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha512[i + 15]); \
118 for (int i = 16; i < 80; i += 16)
120 ROUND_EXPAND (); ROUND_STEP (i);
144 static void m01740m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
150 const u32 gid = get_global_id (0);
151 const u32 lid = get_local_id (0);
159 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
160 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
161 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
162 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
166 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
167 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
168 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
169 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
185 const u32 salt_len = salt_bufs[salt_pos].salt_len;
187 const u32 pw_salt_len = pw_len + salt_len;
195 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
197 const u32 w0r = bfs_buf[il_pos].i;
210 w0_t[0] = swap_workaround (w0[0]);
211 w0_t[1] = swap_workaround (w0[1]);
212 w0_t[2] = swap_workaround (w0[2]);
213 w0_t[3] = swap_workaround (w0[3]);
214 w1_t[0] = swap_workaround (w1[0]);
215 w1_t[1] = swap_workaround (w1[1]);
216 w1_t[2] = swap_workaround (w1[2]);
217 w1_t[3] = swap_workaround (w1[3]);
218 w2_t[0] = swap_workaround (w2[0]);
219 w2_t[1] = swap_workaround (w2[1]);
220 w2_t[2] = swap_workaround (w2[2]);
221 w2_t[3] = swap_workaround (w2[3]);
222 w3_t[0] = swap_workaround (w3[0]);
223 w3_t[1] = swap_workaround (w3[1]);
224 w3_t[2] = swap_workaround (w3[2]);
225 w3_t[3] = swap_workaround (w3[3]);
227 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, salt_len);
229 w0_t[0] |= salt_buf0[0];
230 w0_t[1] |= salt_buf0[1];
231 w0_t[2] |= salt_buf0[2];
232 w0_t[3] |= salt_buf0[3];
233 w1_t[0] |= salt_buf1[0];
234 w1_t[1] |= salt_buf1[1];
235 w1_t[2] |= salt_buf1[2];
236 w1_t[3] |= salt_buf1[3];
237 w2_t[0] |= salt_buf2[0];
238 w2_t[1] |= salt_buf2[1];
239 w2_t[2] |= salt_buf2[2];
240 w2_t[3] |= salt_buf2[3];
241 w3_t[0] |= salt_buf3[0];
242 w3_t[1] |= salt_buf3[1];
244 w3_t[3] = pw_salt_len * 8;
250 w0_t[0] = swap_workaround (w0_t[0]);
251 w0_t[1] = swap_workaround (w0_t[1]);
252 w0_t[2] = swap_workaround (w0_t[2]);
253 w0_t[3] = swap_workaround (w0_t[3]);
254 w1_t[0] = swap_workaround (w1_t[0]);
255 w1_t[1] = swap_workaround (w1_t[1]);
256 w1_t[2] = swap_workaround (w1_t[2]);
257 w1_t[3] = swap_workaround (w1_t[3]);
258 w2_t[0] = swap_workaround (w2_t[0]);
259 w2_t[1] = swap_workaround (w2_t[1]);
260 w2_t[2] = swap_workaround (w2_t[2]);
261 w2_t[3] = swap_workaround (w2_t[3]);
262 w3_t[0] = swap_workaround (w3_t[0]);
263 w3_t[1] = swap_workaround (w3_t[1]);
264 //w3_t[2] = swap_workaround (w3_t[2]);
265 //w3_t[3] = swap_workaround (w3_t[3]);
269 digest[0] = SHA512M_A;
270 digest[1] = SHA512M_B;
271 digest[2] = SHA512M_C;
272 digest[3] = SHA512M_D;
273 digest[4] = SHA512M_E;
274 digest[5] = SHA512M_F;
275 digest[6] = SHA512M_G;
276 digest[7] = SHA512M_H;
278 sha512_transform (w0_t, w1_t, w2_t, w3_t, digest);
281 const u32 r0 = l32_from_64 (digest[7]);
282 const u32 r1 = h32_from_64 (digest[7]);
283 const u32 r2 = l32_from_64 (digest[3]);
284 const u32 r3 = h32_from_64 (digest[3]);
290 static void m01740s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
296 const u32 gid = get_global_id (0);
297 const u32 lid = get_local_id (0);
303 const u32 search[4] =
305 digests_buf[digests_offset].digest_buf[DGST_R0],
306 digests_buf[digests_offset].digest_buf[DGST_R1],
307 digests_buf[digests_offset].digest_buf[DGST_R2],
308 digests_buf[digests_offset].digest_buf[DGST_R3]
317 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
318 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
319 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
320 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
324 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
325 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
326 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
327 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
343 const u32 salt_len = salt_bufs[salt_pos].salt_len;
345 const u32 pw_salt_len = pw_len + salt_len;
353 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
355 const u32 w0r = bfs_buf[il_pos].i;
368 w0_t[0] = swap_workaround (w0[0]);
369 w0_t[1] = swap_workaround (w0[1]);
370 w0_t[2] = swap_workaround (w0[2]);
371 w0_t[3] = swap_workaround (w0[3]);
372 w1_t[0] = swap_workaround (w1[0]);
373 w1_t[1] = swap_workaround (w1[1]);
374 w1_t[2] = swap_workaround (w1[2]);
375 w1_t[3] = swap_workaround (w1[3]);
376 w2_t[0] = swap_workaround (w2[0]);
377 w2_t[1] = swap_workaround (w2[1]);
378 w2_t[2] = swap_workaround (w2[2]);
379 w2_t[3] = swap_workaround (w2[3]);
380 w3_t[0] = swap_workaround (w3[0]);
381 w3_t[1] = swap_workaround (w3[1]);
382 w3_t[2] = swap_workaround (w3[2]);
383 w3_t[3] = swap_workaround (w3[3]);
385 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, salt_len);
387 w0_t[0] |= salt_buf0[0];
388 w0_t[1] |= salt_buf0[1];
389 w0_t[2] |= salt_buf0[2];
390 w0_t[3] |= salt_buf0[3];
391 w1_t[0] |= salt_buf1[0];
392 w1_t[1] |= salt_buf1[1];
393 w1_t[2] |= salt_buf1[2];
394 w1_t[3] |= salt_buf1[3];
395 w2_t[0] |= salt_buf2[0];
396 w2_t[1] |= salt_buf2[1];
397 w2_t[2] |= salt_buf2[2];
398 w2_t[3] |= salt_buf2[3];
399 w3_t[0] |= salt_buf3[0];
400 w3_t[1] |= salt_buf3[1];
402 w3_t[3] = pw_salt_len * 8;
408 w0_t[0] = swap_workaround (w0_t[0]);
409 w0_t[1] = swap_workaround (w0_t[1]);
410 w0_t[2] = swap_workaround (w0_t[2]);
411 w0_t[3] = swap_workaround (w0_t[3]);
412 w1_t[0] = swap_workaround (w1_t[0]);
413 w1_t[1] = swap_workaround (w1_t[1]);
414 w1_t[2] = swap_workaround (w1_t[2]);
415 w1_t[3] = swap_workaround (w1_t[3]);
416 w2_t[0] = swap_workaround (w2_t[0]);
417 w2_t[1] = swap_workaround (w2_t[1]);
418 w2_t[2] = swap_workaround (w2_t[2]);
419 w2_t[3] = swap_workaround (w2_t[3]);
420 w3_t[0] = swap_workaround (w3_t[0]);
421 w3_t[1] = swap_workaround (w3_t[1]);
422 //w3_t[2] = swap_workaround (w3_t[2]);
423 //w3_t[3] = swap_workaround (w3_t[3]);
427 digest[0] = SHA512M_A;
428 digest[1] = SHA512M_B;
429 digest[2] = SHA512M_C;
430 digest[3] = SHA512M_D;
431 digest[4] = SHA512M_E;
432 digest[5] = SHA512M_F;
433 digest[6] = SHA512M_G;
434 digest[7] = SHA512M_H;
436 sha512_transform (w0_t, w1_t, w2_t, w3_t, digest);
439 const u32 r0 = l32_from_64 (digest[7]);
440 const u32 r1 = h32_from_64 (digest[7]);
441 const u32 r2 = l32_from_64 (digest[3]);
442 const u32 r3 = h32_from_64 (digest[3]);
448 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01740_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
454 const u32 gid = get_global_id (0);
456 if (gid >= gid_max) return;
460 w0[0] = pws[gid].i[ 0];
461 w0[1] = pws[gid].i[ 1];
462 w0[2] = pws[gid].i[ 2];
463 w0[3] = pws[gid].i[ 3];
486 const u32 pw_len = pws[gid].pw_len;
492 m01740m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
495 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01740_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
501 const u32 gid = get_global_id (0);
503 if (gid >= gid_max) return;
507 w0[0] = pws[gid].i[ 0];
508 w0[1] = pws[gid].i[ 1];
509 w0[2] = pws[gid].i[ 2];
510 w0[3] = pws[gid].i[ 3];
514 w1[0] = pws[gid].i[ 4];
515 w1[1] = pws[gid].i[ 5];
516 w1[2] = pws[gid].i[ 6];
517 w1[3] = pws[gid].i[ 7];
533 const u32 pw_len = pws[gid].pw_len;
539 m01740m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
542 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01740_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
548 const u32 gid = get_global_id (0);
550 if (gid >= gid_max) return;
554 w0[0] = pws[gid].i[ 0];
555 w0[1] = pws[gid].i[ 1];
556 w0[2] = pws[gid].i[ 2];
557 w0[3] = pws[gid].i[ 3];
561 w1[0] = pws[gid].i[ 4];
562 w1[1] = pws[gid].i[ 5];
563 w1[2] = pws[gid].i[ 6];
564 w1[3] = pws[gid].i[ 7];
568 w2[0] = pws[gid].i[ 8];
569 w2[1] = pws[gid].i[ 9];
570 w2[2] = pws[gid].i[10];
571 w2[3] = pws[gid].i[11];
575 w3[0] = pws[gid].i[12];
576 w3[1] = pws[gid].i[13];
580 const u32 pw_len = pws[gid].pw_len;
586 m01740m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
589 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01740_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
595 const u32 gid = get_global_id (0);
597 if (gid >= gid_max) return;
601 w0[0] = pws[gid].i[ 0];
602 w0[1] = pws[gid].i[ 1];
603 w0[2] = pws[gid].i[ 2];
604 w0[3] = pws[gid].i[ 3];
627 const u32 pw_len = pws[gid].pw_len;
633 m01740s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
636 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01740_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
642 const u32 gid = get_global_id (0);
644 if (gid >= gid_max) return;
648 w0[0] = pws[gid].i[ 0];
649 w0[1] = pws[gid].i[ 1];
650 w0[2] = pws[gid].i[ 2];
651 w0[3] = pws[gid].i[ 3];
655 w1[0] = pws[gid].i[ 4];
656 w1[1] = pws[gid].i[ 5];
657 w1[2] = pws[gid].i[ 6];
658 w1[3] = pws[gid].i[ 7];
674 const u32 pw_len = pws[gid].pw_len;
680 m01740s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
683 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01740_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
689 const u32 gid = get_global_id (0);
691 if (gid >= gid_max) return;
695 w0[0] = pws[gid].i[ 0];
696 w0[1] = pws[gid].i[ 1];
697 w0[2] = pws[gid].i[ 2];
698 w0[3] = pws[gid].i[ 3];
702 w1[0] = pws[gid].i[ 4];
703 w1[1] = pws[gid].i[ 5];
704 w1[2] = pws[gid].i[ 6];
705 w1[3] = pws[gid].i[ 7];
709 w2[0] = pws[gid].i[ 8];
710 w2[1] = pws[gid].i[ 9];
711 w2[2] = pws[gid].i[10];
712 w2[3] = pws[gid].i[11];
716 w3[0] = pws[gid].i[12];
717 w3[1] = pws[gid].i[13];
721 const u32 pw_len = pws[gid].pw_len;
727 m01740s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);