2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
34 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
38 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
39 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
42 __constant u64 keccakf_rndc[24] =
44 0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
45 0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
46 0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
47 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
48 0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
49 0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
50 0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
51 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
54 __constant u32 keccakf_rotc[24] =
56 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
57 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
60 __constant u32 keccakf_piln[24] =
62 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
63 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
67 #define KECCAK_ROUNDS 24
70 #define Theta1(s) (st[0 + s] ^ st[5 + s] ^ st[10 + s] ^ st[15 + s] ^ st[20 + s])
83 u32 j = keccakf_piln[s]; \
84 u32 k = keccakf_rotc[s]; \
86 st[j] = rotl64 (t, k); \
97 st[0 + s] ^= ~bc1 & bc2; \
98 st[1 + s] ^= ~bc2 & bc3; \
99 st[2 + s] ^= ~bc3 & bc4; \
100 st[3 + s] ^= ~bc4 & bc0; \
101 st[4 + s] ^= ~bc0 & bc1; \
104 static void m05000m (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
110 const u32 gid = get_global_id (0);
111 const u32 lid = get_local_id (0);
114 * 0x80 keccak, very special
117 const u32 mdlen = salt_bufs[salt_pos].keccak_mdlen;
119 const u32 rsiz = 200 - (2 * mdlen);
121 const u32 add80w = (rsiz - 1) / 8;
129 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
131 const u32 w0r = bfs_buf[il_pos].i;
138 st[ 0] = (u64x) (w0[0]) | (u64x) (w0[1]) << 32;
139 st[ 1] = (u64x) (w0[2]) | (u64x) (w0[3]) << 32;
140 st[ 2] = (u64x) (w1[0]) | (u64x) (w1[1]) << 32;
141 st[ 3] = (u64x) (w1[2]) | (u64x) (w1[3]) << 32;
142 st[ 4] = (u64x) (w2[0]) | (u64x) (w2[1]) << 32;
143 st[ 5] = (u64x) (w2[2]) | (u64x) (w2[3]) << 32;
144 st[ 6] = (u64x) (w3[0]) | (u64x) (w3[1]) << 32;
145 st[ 7] = (u64x) (w3[2]) | (u64x) (w3[3]) << 32;
149 st[ 0] = (u64x) (w0[0].s0, w0[0].s1) | (u64x) (w0[1].s0, w0[1].s1) << 32;
150 st[ 1] = (u64x) (w0[2].s0, w0[2].s1) | (u64x) (w0[3].s0, w0[3].s1) << 32;
151 st[ 2] = (u64x) (w1[0].s0, w1[0].s1) | (u64x) (w1[1].s0, w1[1].s1) << 32;
152 st[ 3] = (u64x) (w1[2].s0, w1[2].s1) | (u64x) (w1[3].s0, w1[3].s1) << 32;
153 st[ 4] = (u64x) (w2[0].s0, w2[0].s1) | (u64x) (w2[1].s0, w2[1].s1) << 32;
154 st[ 5] = (u64x) (w2[2].s0, w2[2].s1) | (u64x) (w2[3].s0, w2[3].s1) << 32;
155 st[ 6] = (u64x) (w3[0].s0, w3[0].s1) | (u64x) (w3[1].s0, w3[1].s1) << 32;
156 st[ 7] = (u64x) (w3[2].s0, w3[2].s1) | (u64x) (w3[3].s0, w3[3].s1) << 32;
177 st[add80w] |= 0x8000000000000000;
181 for (round = 0; round < KECCAK_ROUNDS; round++)
185 u64x bc0 = Theta1 (0);
186 u64x bc1 = Theta1 (1);
187 u64x bc2 = Theta1 (2);
188 u64x bc3 = Theta1 (3);
189 u64x bc4 = Theta1 (4);
193 t = bc4 ^ rotl64 (bc1, 1); Theta2 (0);
194 t = bc0 ^ rotl64 (bc2, 1); Theta2 (1);
195 t = bc1 ^ rotl64 (bc3, 1); Theta2 (2);
196 t = bc2 ^ rotl64 (bc4, 1); Theta2 (3);
197 t = bc3 ^ rotl64 (bc0, 1); Theta2 (4);
238 st[0] ^= keccakf_rndc[round];
241 const u32x r0 = l32_from_64 (st[1]);
242 const u32x r1 = h32_from_64 (st[1]);
243 const u32x r2 = l32_from_64 (st[2]);
244 const u32x r3 = h32_from_64 (st[2]);
246 #include VECT_COMPARE_M
250 static void m05000s (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
256 const u32 gid = get_global_id (0);
257 const u32 lid = get_local_id (0);
263 const u32 search[4] =
265 digests_buf[digests_offset].digest_buf[DGST_R0],
266 digests_buf[digests_offset].digest_buf[DGST_R1],
267 digests_buf[digests_offset].digest_buf[DGST_R2],
268 digests_buf[digests_offset].digest_buf[DGST_R3]
272 * 0x80 keccak, very special
275 const u32 mdlen = salt_bufs[salt_pos].keccak_mdlen;
277 const u32 rsiz = 200 - (2 * mdlen);
279 const u32 add80w = (rsiz - 1) / 8;
287 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
289 const u32 w0r = bfs_buf[il_pos].i;
296 st[ 0] = (u64x) (w0[0]) | (u64x) (w0[1]) << 32;
297 st[ 1] = (u64x) (w0[2]) | (u64x) (w0[3]) << 32;
298 st[ 2] = (u64x) (w1[0]) | (u64x) (w1[1]) << 32;
299 st[ 3] = (u64x) (w1[2]) | (u64x) (w1[3]) << 32;
300 st[ 4] = (u64x) (w2[0]) | (u64x) (w2[1]) << 32;
301 st[ 5] = (u64x) (w2[2]) | (u64x) (w2[3]) << 32;
302 st[ 6] = (u64x) (w3[0]) | (u64x) (w3[1]) << 32;
303 st[ 7] = (u64x) (w3[2]) | (u64x) (w3[3]) << 32;
307 st[ 0] = (u64x) (w0[0].s0, w0[0].s1) | (u64x) (w0[1].s0, w0[1].s1) << 32;
308 st[ 1] = (u64x) (w0[2].s0, w0[2].s1) | (u64x) (w0[3].s0, w0[3].s1) << 32;
309 st[ 2] = (u64x) (w1[0].s0, w1[0].s1) | (u64x) (w1[1].s0, w1[1].s1) << 32;
310 st[ 3] = (u64x) (w1[2].s0, w1[2].s1) | (u64x) (w1[3].s0, w1[3].s1) << 32;
311 st[ 4] = (u64x) (w2[0].s0, w2[0].s1) | (u64x) (w2[1].s0, w2[1].s1) << 32;
312 st[ 5] = (u64x) (w2[2].s0, w2[2].s1) | (u64x) (w2[3].s0, w2[3].s1) << 32;
313 st[ 6] = (u64x) (w3[0].s0, w3[0].s1) | (u64x) (w3[1].s0, w3[1].s1) << 32;
314 st[ 7] = (u64x) (w3[2].s0, w3[2].s1) | (u64x) (w3[3].s0, w3[3].s1) << 32;
335 st[add80w] |= 0x8000000000000000;
339 for (round = 0; round < KECCAK_ROUNDS; round++)
343 u64x bc0 = Theta1 (0);
344 u64x bc1 = Theta1 (1);
345 u64x bc2 = Theta1 (2);
346 u64x bc3 = Theta1 (3);
347 u64x bc4 = Theta1 (4);
351 t = bc4 ^ rotl64 (bc1, 1); Theta2 (0);
352 t = bc0 ^ rotl64 (bc2, 1); Theta2 (1);
353 t = bc1 ^ rotl64 (bc3, 1); Theta2 (2);
354 t = bc2 ^ rotl64 (bc4, 1); Theta2 (3);
355 t = bc3 ^ rotl64 (bc0, 1); Theta2 (4);
396 st[0] ^= keccakf_rndc[round];
399 const u32x r0 = l32_from_64 (st[1]);
400 const u32x r1 = h32_from_64 (st[1]);
401 const u32x r2 = l32_from_64 (st[2]);
402 const u32x r3 = h32_from_64 (st[2]);
404 #include VECT_COMPARE_S
408 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
414 const u32 gid = get_global_id (0);
416 if (gid >= gid_max) return;
420 w0[0] = pws[gid].i[ 0];
421 w0[1] = pws[gid].i[ 1];
422 w0[2] = pws[gid].i[ 2];
423 w0[3] = pws[gid].i[ 3];
446 const u32 pw_len = pws[gid].pw_len;
452 m05000m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
455 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
461 const u32 gid = get_global_id (0);
463 if (gid >= gid_max) return;
467 w0[0] = pws[gid].i[ 0];
468 w0[1] = pws[gid].i[ 1];
469 w0[2] = pws[gid].i[ 2];
470 w0[3] = pws[gid].i[ 3];
474 w1[0] = pws[gid].i[ 4];
475 w1[1] = pws[gid].i[ 5];
476 w1[2] = pws[gid].i[ 6];
477 w1[3] = pws[gid].i[ 7];
493 const u32 pw_len = pws[gid].pw_len;
499 m05000m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
502 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
508 const u32 gid = get_global_id (0);
510 if (gid >= gid_max) return;
514 w0[0] = pws[gid].i[ 0];
515 w0[1] = pws[gid].i[ 1];
516 w0[2] = pws[gid].i[ 2];
517 w0[3] = pws[gid].i[ 3];
521 w1[0] = pws[gid].i[ 4];
522 w1[1] = pws[gid].i[ 5];
523 w1[2] = pws[gid].i[ 6];
524 w1[3] = pws[gid].i[ 7];
528 w2[0] = pws[gid].i[ 8];
529 w2[1] = pws[gid].i[ 9];
530 w2[2] = pws[gid].i[10];
531 w2[3] = pws[gid].i[11];
535 w3[0] = pws[gid].i[12];
536 w3[1] = pws[gid].i[13];
540 const u32 pw_len = pws[gid].pw_len;
546 m05000m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
549 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
555 const u32 gid = get_global_id (0);
557 if (gid >= gid_max) return;
561 w0[0] = pws[gid].i[ 0];
562 w0[1] = pws[gid].i[ 1];
563 w0[2] = pws[gid].i[ 2];
564 w0[3] = pws[gid].i[ 3];
587 const u32 pw_len = pws[gid].pw_len;
593 m05000s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
596 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
602 const u32 gid = get_global_id (0);
604 if (gid >= gid_max) return;
608 w0[0] = pws[gid].i[ 0];
609 w0[1] = pws[gid].i[ 1];
610 w0[2] = pws[gid].i[ 2];
611 w0[3] = pws[gid].i[ 3];
615 w1[0] = pws[gid].i[ 4];
616 w1[1] = pws[gid].i[ 5];
617 w1[2] = pws[gid].i[ 6];
618 w1[3] = pws[gid].i[ 7];
634 const u32 pw_len = pws[gid].pw_len;
640 m05000s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
643 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m05000_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
649 const u32 gid = get_global_id (0);
651 if (gid >= gid_max) return;
655 w0[0] = pws[gid].i[ 0];
656 w0[1] = pws[gid].i[ 1];
657 w0[2] = pws[gid].i[ 2];
658 w0[3] = pws[gid].i[ 3];
662 w1[0] = pws[gid].i[ 4];
663 w1[1] = pws[gid].i[ 5];
664 w1[2] = pws[gid].i[ 6];
665 w1[3] = pws[gid].i[ 7];
669 w2[0] = pws[gid].i[ 8];
670 w2[1] = pws[gid].i[ 9];
671 w2[2] = pws[gid].i[10];
672 w2[3] = pws[gid].i[11];
676 w3[0] = pws[gid].i[12];
677 w3[1] = pws[gid].i[13];
681 const u32 pw_len = pws[gid].pw_len;
687 m05000s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);