2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
30 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
34 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
35 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
39 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
40 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
43 #define INITVAL 0x0101010101010101
45 #define SBOG_LPSti64 \
46 s_sbob_sl64[0][(t[0] >> (i * 8)) & 0xff] ^ \
47 s_sbob_sl64[1][(t[1] >> (i * 8)) & 0xff] ^ \
48 s_sbob_sl64[2][(t[2] >> (i * 8)) & 0xff] ^ \
49 s_sbob_sl64[3][(t[3] >> (i * 8)) & 0xff] ^ \
50 s_sbob_sl64[4][(t[4] >> (i * 8)) & 0xff] ^ \
51 s_sbob_sl64[5][(t[5] >> (i * 8)) & 0xff] ^ \
52 s_sbob_sl64[6][(t[6] >> (i * 8)) & 0xff] ^ \
53 s_sbob_sl64[7][(t[7] >> (i * 8)) & 0xff]
57 __device__ __constant__ u64 sbob_sl64[8][256] =
2125 __device__ __constant__ u64 sbob_rc64[12][8] =
2249 __device__ static void streebog_g (u64 h[8], const u64 m[8], u64 s_sbob_sl64[8][256])
2256 for (int i = 0; i < 8; i++)
2261 for (int i = 0; i < 8; i++)
2263 k[i] = SBOG_LPSti64;
2267 for (int i = 0; i < 8; i++)
2272 for (int r = 0; r < 12; r++)
2275 for (int i = 0; i < 8; i++)
2281 for (int i = 0; i < 8; i++)
2283 s[i] = SBOG_LPSti64;
2286 for (int i = 0; i < 8; i++)
2288 t[i] = k[i] ^ sbob_rc64[r][i];
2292 for (int i = 0; i < 8; i++)
2294 k[i] = SBOG_LPSti64;
2299 for (int i = 0; i < 8; i++)
2301 h[i] ^= s[i] ^ k[i] ^ m[i];
2305 __device__ __constant__ comb_t c_combs[1024];
2307 extern "C" __global__ void __launch_bounds__ (256, 1) m11700_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2313 const u32 lid = threadIdx.x;
2316 * shared lookup table
2319 __shared__ u64 s_sbob_sl64[8][256];
2323 const u32 lid4 = lid * 4;
2325 s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0];
2326 s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1];
2327 s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2];
2328 s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3];
2329 s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0];
2330 s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1];
2331 s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2];
2332 s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3];
2333 s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0];
2334 s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1];
2335 s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2];
2336 s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3];
2337 s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0];
2338 s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1];
2339 s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2];
2340 s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3];
2341 s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0];
2342 s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1];
2343 s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2];
2344 s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3];
2345 s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0];
2346 s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1];
2347 s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2];
2348 s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3];
2349 s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0];
2350 s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1];
2351 s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2];
2352 s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3];
2353 s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0];
2354 s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1];
2355 s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2];
2356 s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3];
2365 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
2367 if (gid >= gid_max) return;
2371 wordl0[0] = pws[gid].i[ 0];
2372 wordl0[1] = pws[gid].i[ 1];
2373 wordl0[2] = pws[gid].i[ 2];
2374 wordl0[3] = pws[gid].i[ 3];
2378 wordl1[0] = pws[gid].i[ 4];
2379 wordl1[1] = pws[gid].i[ 5];
2380 wordl1[2] = pws[gid].i[ 6];
2381 wordl1[3] = pws[gid].i[ 7];
2397 const u32 pw_l_len = pws[gid].pw_len;
2399 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
2401 append_0x80_2 (wordl0, wordl1, pw_l_len);
2403 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, c_combs[0].pw_len);
2410 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
2412 const u32 pw_r_len = c_combs[il_pos].pw_len;
2414 const u32 pw_len = pw_l_len + pw_r_len;
2418 wordr0[0] = c_combs[il_pos].i[0];
2419 wordr0[1] = c_combs[il_pos].i[1];
2420 wordr0[2] = c_combs[il_pos].i[2];
2421 wordr0[3] = c_combs[il_pos].i[3];
2425 wordr1[0] = c_combs[il_pos].i[4];
2426 wordr1[1] = c_combs[il_pos].i[5];
2427 wordr1[2] = c_combs[il_pos].i[6];
2428 wordr1[3] = c_combs[il_pos].i[7];
2444 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
2446 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
2451 w[ 0] = wordl0[0] | wordr0[0];
2452 w[ 1] = wordl0[1] | wordr0[1];
2453 w[ 2] = wordl0[2] | wordr0[2];
2454 w[ 3] = wordl0[3] | wordr0[3];
2455 w[ 4] = wordl1[0] | wordr1[0];
2456 w[ 5] = wordl1[1] | wordr1[1];
2457 w[ 6] = wordl1[2] | wordr1[2];
2458 w[ 7] = wordl1[3] | wordr1[3];
2459 w[ 8] = wordl2[0] | wordr2[0];
2460 w[ 9] = wordl2[1] | wordr2[1];
2461 w[10] = wordl2[2] | wordr2[2];
2462 w[11] = wordl2[3] | wordr2[3];
2463 w[12] = wordl3[0] | wordr3[0];
2464 w[13] = wordl3[1] | wordr3[1];
2465 w[14] = wordl3[1] | wordr3[1];
2466 w[15] = wordl3[1] | wordr3[1];
2468 append_0x01_4 (&w[0], &w[1], &w[2], &w[3], pw_len);
2471 * reverse message block
2476 m[0] = hl32_to_64 (w[15], w[14]);
2477 m[1] = hl32_to_64 (w[13], w[12]);
2478 m[2] = hl32_to_64 (w[11], w[10]);
2479 m[3] = hl32_to_64 (w[ 9], w[ 8]);
2480 m[4] = hl32_to_64 (w[ 7], w[ 6]);
2481 m[5] = hl32_to_64 (w[ 5], w[ 4]);
2482 m[6] = hl32_to_64 (w[ 3], w[ 2]);
2483 m[7] = hl32_to_64 (w[ 1], w[ 0]);
2485 m[0] = swap_workaround (m[0]);
2486 m[1] = swap_workaround (m[1]);
2487 m[2] = swap_workaround (m[2]);
2488 m[3] = swap_workaround (m[3]);
2489 m[4] = swap_workaround (m[4]);
2490 m[5] = swap_workaround (m[5]);
2491 m[6] = swap_workaround (m[6]);
2492 m[7] = swap_workaround (m[7]);
2494 // state buffer (hash)
2507 streebog_g (h, m, s_sbob_sl64);
2518 z[7] = swap_workaround ((u64) (pw_len * 8));
2520 streebog_g (h, z, s_sbob_sl64);
2521 streebog_g (h, m, s_sbob_sl64);
2523 const u32 r0 = l32_from_64 (h[0]);
2524 const u32 r1 = h32_from_64 (h[0]);
2525 const u32 r2 = l32_from_64 (h[1]);
2526 const u32 r3 = h32_from_64 (h[1]);
2528 #include VECT_COMPARE_M
2532 extern "C" __global__ void __launch_bounds__ (256, 1) m11700_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2536 extern "C" __global__ void __launch_bounds__ (256, 1) m11700_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2540 extern "C" __global__ void __launch_bounds__ (256, 1) m11700_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2546 const u32 lid = threadIdx.x;
2549 * shared lookup table
2552 __shared__ u64 s_sbob_sl64[8][256];
2556 const u32 lid4 = lid * 4;
2558 s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0];
2559 s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1];
2560 s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2];
2561 s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3];
2562 s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0];
2563 s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1];
2564 s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2];
2565 s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3];
2566 s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0];
2567 s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1];
2568 s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2];
2569 s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3];
2570 s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0];
2571 s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1];
2572 s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2];
2573 s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3];
2574 s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0];
2575 s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1];
2576 s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2];
2577 s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3];
2578 s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0];
2579 s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1];
2580 s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2];
2581 s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3];
2582 s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0];
2583 s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1];
2584 s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2];
2585 s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3];
2586 s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0];
2587 s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1];
2588 s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2];
2589 s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3];
2598 const u32 search[4] =
2600 digests_buf[digests_offset].digest_buf[DGST_R0],
2601 digests_buf[digests_offset].digest_buf[DGST_R1],
2602 digests_buf[digests_offset].digest_buf[DGST_R2],
2603 digests_buf[digests_offset].digest_buf[DGST_R3]
2610 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
2612 if (gid >= gid_max) return;
2616 wordl0[0] = pws[gid].i[ 0];
2617 wordl0[1] = pws[gid].i[ 1];
2618 wordl0[2] = pws[gid].i[ 2];
2619 wordl0[3] = pws[gid].i[ 3];
2623 wordl1[0] = pws[gid].i[ 4];
2624 wordl1[1] = pws[gid].i[ 5];
2625 wordl1[2] = pws[gid].i[ 6];
2626 wordl1[3] = pws[gid].i[ 7];
2642 const u32 pw_l_len = pws[gid].pw_len;
2644 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
2646 append_0x80_2 (wordl0, wordl1, pw_l_len);
2648 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, c_combs[0].pw_len);
2655 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
2657 const u32 pw_r_len = c_combs[il_pos].pw_len;
2659 const u32 pw_len = pw_l_len + pw_r_len;
2663 wordr0[0] = c_combs[il_pos].i[0];
2664 wordr0[1] = c_combs[il_pos].i[1];
2665 wordr0[2] = c_combs[il_pos].i[2];
2666 wordr0[3] = c_combs[il_pos].i[3];
2670 wordr1[0] = c_combs[il_pos].i[4];
2671 wordr1[1] = c_combs[il_pos].i[5];
2672 wordr1[2] = c_combs[il_pos].i[6];
2673 wordr1[3] = c_combs[il_pos].i[7];
2689 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
2691 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
2696 w[ 0] = wordl0[0] | wordr0[0];
2697 w[ 1] = wordl0[1] | wordr0[1];
2698 w[ 2] = wordl0[2] | wordr0[2];
2699 w[ 3] = wordl0[3] | wordr0[3];
2700 w[ 4] = wordl1[0] | wordr1[0];
2701 w[ 5] = wordl1[1] | wordr1[1];
2702 w[ 6] = wordl1[2] | wordr1[2];
2703 w[ 7] = wordl1[3] | wordr1[3];
2704 w[ 8] = wordl2[0] | wordr2[0];
2705 w[ 9] = wordl2[1] | wordr2[1];
2706 w[10] = wordl2[2] | wordr2[2];
2707 w[11] = wordl2[3] | wordr2[3];
2708 w[12] = wordl3[0] | wordr3[0];
2709 w[13] = wordl3[1] | wordr3[1];
2710 w[14] = wordl3[1] | wordr3[1];
2711 w[15] = wordl3[1] | wordr3[1];
2713 append_0x01_4 (&w[0], &w[1], &w[2], &w[3], pw_len);
2716 * reverse message block
2721 m[0] = hl32_to_64 (w[15], w[14]);
2722 m[1] = hl32_to_64 (w[13], w[12]);
2723 m[2] = hl32_to_64 (w[11], w[10]);
2724 m[3] = hl32_to_64 (w[ 9], w[ 8]);
2725 m[4] = hl32_to_64 (w[ 7], w[ 6]);
2726 m[5] = hl32_to_64 (w[ 5], w[ 4]);
2727 m[6] = hl32_to_64 (w[ 3], w[ 2]);
2728 m[7] = hl32_to_64 (w[ 1], w[ 0]);
2730 m[0] = swap_workaround (m[0]);
2731 m[1] = swap_workaround (m[1]);
2732 m[2] = swap_workaround (m[2]);
2733 m[3] = swap_workaround (m[3]);
2734 m[4] = swap_workaround (m[4]);
2735 m[5] = swap_workaround (m[5]);
2736 m[6] = swap_workaround (m[6]);
2737 m[7] = swap_workaround (m[7]);
2739 // state buffer (hash)
2752 streebog_g (h, m, s_sbob_sl64);
2763 z[7] = swap_workaround ((u64) (pw_len * 8));
2765 streebog_g (h, z, s_sbob_sl64);
2766 streebog_g (h, m, s_sbob_sl64);
2768 const u32 r0 = l32_from_64 (h[0]);
2769 const u32 r1 = h32_from_64 (h[0]);
2770 const u32 r2 = l32_from_64 (h[1]);
2771 const u32 r3 = h32_from_64 (h[1]);
2773 #include VECT_COMPARE_S
2777 extern "C" __global__ void __launch_bounds__ (256, 1) m11700_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2781 extern "C" __global__ void __launch_bounds__ (256, 1) m11700_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)