2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
30 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
34 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
35 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
39 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
40 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
46 #define SBOG_LPSti64 \
47 s_sbob_sl64[0][(t[0] >> (i * 8)) & 0xff] ^ \
48 s_sbob_sl64[1][(t[1] >> (i * 8)) & 0xff] ^ \
49 s_sbob_sl64[2][(t[2] >> (i * 8)) & 0xff] ^ \
50 s_sbob_sl64[3][(t[3] >> (i * 8)) & 0xff] ^ \
51 s_sbob_sl64[4][(t[4] >> (i * 8)) & 0xff] ^ \
52 s_sbob_sl64[5][(t[5] >> (i * 8)) & 0xff] ^ \
53 s_sbob_sl64[6][(t[6] >> (i * 8)) & 0xff] ^ \
54 s_sbob_sl64[7][(t[7] >> (i * 8)) & 0xff]
58 __device__ __constant__ u64 sbob_sl64[8][256] =
2126 __device__ __constant__ u64 sbob_rc64[12][8] =
2250 __device__ static void streebog_g (u64 h[8], const u64 m[8], u64 s_sbob_sl64[8][256])
2257 for (int i = 0; i < 8; i++)
2262 for (int i = 0; i < 8; i++)
2264 k[i] = SBOG_LPSti64;
2268 for (int i = 0; i < 8; i++)
2273 for (int r = 0; r < 12; r++)
2276 for (int i = 0; i < 8; i++)
2282 for (int i = 0; i < 8; i++)
2284 s[i] = SBOG_LPSti64;
2287 for (int i = 0; i < 8; i++)
2289 t[i] = k[i] ^ sbob_rc64[r][i];
2293 for (int i = 0; i < 8; i++)
2295 k[i] = SBOG_LPSti64;
2300 for (int i = 0; i < 8; i++)
2302 h[i] ^= s[i] ^ k[i] ^ m[i];
2306 __device__ __constant__ comb_t c_combs[1024];
2308 extern "C" __global__ void __launch_bounds__ (256, 1) m11800_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2314 const u32 lid = threadIdx.x;
2317 * shared lookup table
2320 __shared__ u64 s_sbob_sl64[8][256];
2324 const u32 lid4 = lid * 4;
2326 s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0];
2327 s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1];
2328 s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2];
2329 s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3];
2330 s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0];
2331 s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1];
2332 s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2];
2333 s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3];
2334 s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0];
2335 s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1];
2336 s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2];
2337 s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3];
2338 s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0];
2339 s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1];
2340 s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2];
2341 s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3];
2342 s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0];
2343 s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1];
2344 s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2];
2345 s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3];
2346 s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0];
2347 s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1];
2348 s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2];
2349 s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3];
2350 s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0];
2351 s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1];
2352 s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2];
2353 s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3];
2354 s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0];
2355 s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1];
2356 s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2];
2357 s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3];
2366 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
2368 if (gid >= gid_max) return;
2372 wordl0[0] = pws[gid].i[ 0];
2373 wordl0[1] = pws[gid].i[ 1];
2374 wordl0[2] = pws[gid].i[ 2];
2375 wordl0[3] = pws[gid].i[ 3];
2379 wordl1[0] = pws[gid].i[ 4];
2380 wordl1[1] = pws[gid].i[ 5];
2381 wordl1[2] = pws[gid].i[ 6];
2382 wordl1[3] = pws[gid].i[ 7];
2398 const u32 pw_l_len = pws[gid].pw_len;
2400 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
2402 append_0x80_2 (wordl0, wordl1, pw_l_len);
2404 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, c_combs[0].pw_len);
2411 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
2413 const u32 pw_r_len = c_combs[il_pos].pw_len;
2415 const u32 pw_len = pw_l_len + pw_r_len;
2419 wordr0[0] = c_combs[il_pos].i[0];
2420 wordr0[1] = c_combs[il_pos].i[1];
2421 wordr0[2] = c_combs[il_pos].i[2];
2422 wordr0[3] = c_combs[il_pos].i[3];
2426 wordr1[0] = c_combs[il_pos].i[4];
2427 wordr1[1] = c_combs[il_pos].i[5];
2428 wordr1[2] = c_combs[il_pos].i[6];
2429 wordr1[3] = c_combs[il_pos].i[7];
2445 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
2447 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
2452 w[ 0] = wordl0[0] | wordr0[0];
2453 w[ 1] = wordl0[1] | wordr0[1];
2454 w[ 2] = wordl0[2] | wordr0[2];
2455 w[ 3] = wordl0[3] | wordr0[3];
2456 w[ 4] = wordl1[0] | wordr1[0];
2457 w[ 5] = wordl1[1] | wordr1[1];
2458 w[ 6] = wordl1[2] | wordr1[2];
2459 w[ 7] = wordl1[3] | wordr1[3];
2460 w[ 8] = wordl2[0] | wordr2[0];
2461 w[ 9] = wordl2[1] | wordr2[1];
2462 w[10] = wordl2[2] | wordr2[2];
2463 w[11] = wordl2[3] | wordr2[3];
2464 w[12] = wordl3[0] | wordr3[0];
2465 w[13] = wordl3[1] | wordr3[1];
2466 w[14] = wordl3[1] | wordr3[1];
2467 w[15] = wordl3[1] | wordr3[1];
2469 append_0x01_4 (&w[0], &w[1], &w[2], &w[3], pw_len);
2472 * reverse message block
2477 m[0] = hl32_to_64 (w[15], w[14]);
2478 m[1] = hl32_to_64 (w[13], w[12]);
2479 m[2] = hl32_to_64 (w[11], w[10]);
2480 m[3] = hl32_to_64 (w[ 9], w[ 8]);
2481 m[4] = hl32_to_64 (w[ 7], w[ 6]);
2482 m[5] = hl32_to_64 (w[ 5], w[ 4]);
2483 m[6] = hl32_to_64 (w[ 3], w[ 2]);
2484 m[7] = hl32_to_64 (w[ 1], w[ 0]);
2486 m[0] = swap_workaround (m[0]);
2487 m[1] = swap_workaround (m[1]);
2488 m[2] = swap_workaround (m[2]);
2489 m[3] = swap_workaround (m[3]);
2490 m[4] = swap_workaround (m[4]);
2491 m[5] = swap_workaround (m[5]);
2492 m[6] = swap_workaround (m[6]);
2493 m[7] = swap_workaround (m[7]);
2495 // state buffer (hash)
2508 streebog_g (h, m, s_sbob_sl64);
2519 z[7] = swap_workaround ((u64) (pw_len * 8));
2521 streebog_g (h, z, s_sbob_sl64);
2522 streebog_g (h, m, s_sbob_sl64);
2524 const u32 r0 = l32_from_64 (h[0]);
2525 const u32 r1 = h32_from_64 (h[0]);
2526 const u32 r2 = l32_from_64 (h[1]);
2527 const u32 r3 = h32_from_64 (h[1]);
2529 #include VECT_COMPARE_M
2533 extern "C" __global__ void __launch_bounds__ (256, 1) m11800_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2537 extern "C" __global__ void __launch_bounds__ (256, 1) m11800_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2541 extern "C" __global__ void __launch_bounds__ (256, 1) m11800_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2547 const u32 lid = threadIdx.x;
2551 * shared lookup table
2554 __shared__ u64 s_sbob_sl64[8][256];
2558 const u32 lid4 = lid * 4;
2560 s_sbob_sl64[0][lid4 + 0] = sbob_sl64[0][lid4 + 0];
2561 s_sbob_sl64[0][lid4 + 1] = sbob_sl64[0][lid4 + 1];
2562 s_sbob_sl64[0][lid4 + 2] = sbob_sl64[0][lid4 + 2];
2563 s_sbob_sl64[0][lid4 + 3] = sbob_sl64[0][lid4 + 3];
2564 s_sbob_sl64[1][lid4 + 0] = sbob_sl64[1][lid4 + 0];
2565 s_sbob_sl64[1][lid4 + 1] = sbob_sl64[1][lid4 + 1];
2566 s_sbob_sl64[1][lid4 + 2] = sbob_sl64[1][lid4 + 2];
2567 s_sbob_sl64[1][lid4 + 3] = sbob_sl64[1][lid4 + 3];
2568 s_sbob_sl64[2][lid4 + 0] = sbob_sl64[2][lid4 + 0];
2569 s_sbob_sl64[2][lid4 + 1] = sbob_sl64[2][lid4 + 1];
2570 s_sbob_sl64[2][lid4 + 2] = sbob_sl64[2][lid4 + 2];
2571 s_sbob_sl64[2][lid4 + 3] = sbob_sl64[2][lid4 + 3];
2572 s_sbob_sl64[3][lid4 + 0] = sbob_sl64[3][lid4 + 0];
2573 s_sbob_sl64[3][lid4 + 1] = sbob_sl64[3][lid4 + 1];
2574 s_sbob_sl64[3][lid4 + 2] = sbob_sl64[3][lid4 + 2];
2575 s_sbob_sl64[3][lid4 + 3] = sbob_sl64[3][lid4 + 3];
2576 s_sbob_sl64[4][lid4 + 0] = sbob_sl64[4][lid4 + 0];
2577 s_sbob_sl64[4][lid4 + 1] = sbob_sl64[4][lid4 + 1];
2578 s_sbob_sl64[4][lid4 + 2] = sbob_sl64[4][lid4 + 2];
2579 s_sbob_sl64[4][lid4 + 3] = sbob_sl64[4][lid4 + 3];
2580 s_sbob_sl64[5][lid4 + 0] = sbob_sl64[5][lid4 + 0];
2581 s_sbob_sl64[5][lid4 + 1] = sbob_sl64[5][lid4 + 1];
2582 s_sbob_sl64[5][lid4 + 2] = sbob_sl64[5][lid4 + 2];
2583 s_sbob_sl64[5][lid4 + 3] = sbob_sl64[5][lid4 + 3];
2584 s_sbob_sl64[6][lid4 + 0] = sbob_sl64[6][lid4 + 0];
2585 s_sbob_sl64[6][lid4 + 1] = sbob_sl64[6][lid4 + 1];
2586 s_sbob_sl64[6][lid4 + 2] = sbob_sl64[6][lid4 + 2];
2587 s_sbob_sl64[6][lid4 + 3] = sbob_sl64[6][lid4 + 3];
2588 s_sbob_sl64[7][lid4 + 0] = sbob_sl64[7][lid4 + 0];
2589 s_sbob_sl64[7][lid4 + 1] = sbob_sl64[7][lid4 + 1];
2590 s_sbob_sl64[7][lid4 + 2] = sbob_sl64[7][lid4 + 2];
2591 s_sbob_sl64[7][lid4 + 3] = sbob_sl64[7][lid4 + 3];
2600 const u32 search[4] =
2602 digests_buf[digests_offset].digest_buf[DGST_R0],
2603 digests_buf[digests_offset].digest_buf[DGST_R1],
2604 digests_buf[digests_offset].digest_buf[DGST_R2],
2605 digests_buf[digests_offset].digest_buf[DGST_R3]
2612 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
2614 if (gid >= gid_max) return;
2618 wordl0[0] = pws[gid].i[ 0];
2619 wordl0[1] = pws[gid].i[ 1];
2620 wordl0[2] = pws[gid].i[ 2];
2621 wordl0[3] = pws[gid].i[ 3];
2625 wordl1[0] = pws[gid].i[ 4];
2626 wordl1[1] = pws[gid].i[ 5];
2627 wordl1[2] = pws[gid].i[ 6];
2628 wordl1[3] = pws[gid].i[ 7];
2644 const u32 pw_l_len = pws[gid].pw_len;
2646 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
2648 append_0x80_2 (wordl0, wordl1, pw_l_len);
2650 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, c_combs[0].pw_len);
2657 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
2659 const u32 pw_r_len = c_combs[il_pos].pw_len;
2661 const u32 pw_len = pw_l_len + pw_r_len;
2665 wordr0[0] = c_combs[il_pos].i[0];
2666 wordr0[1] = c_combs[il_pos].i[1];
2667 wordr0[2] = c_combs[il_pos].i[2];
2668 wordr0[3] = c_combs[il_pos].i[3];
2672 wordr1[0] = c_combs[il_pos].i[4];
2673 wordr1[1] = c_combs[il_pos].i[5];
2674 wordr1[2] = c_combs[il_pos].i[6];
2675 wordr1[3] = c_combs[il_pos].i[7];
2691 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
2693 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
2698 w[ 0] = wordl0[0] | wordr0[0];
2699 w[ 1] = wordl0[1] | wordr0[1];
2700 w[ 2] = wordl0[2] | wordr0[2];
2701 w[ 3] = wordl0[3] | wordr0[3];
2702 w[ 4] = wordl1[0] | wordr1[0];
2703 w[ 5] = wordl1[1] | wordr1[1];
2704 w[ 6] = wordl1[2] | wordr1[2];
2705 w[ 7] = wordl1[3] | wordr1[3];
2706 w[ 8] = wordl2[0] | wordr2[0];
2707 w[ 9] = wordl2[1] | wordr2[1];
2708 w[10] = wordl2[2] | wordr2[2];
2709 w[11] = wordl2[3] | wordr2[3];
2710 w[12] = wordl3[0] | wordr3[0];
2711 w[13] = wordl3[1] | wordr3[1];
2712 w[14] = wordl3[1] | wordr3[1];
2713 w[15] = wordl3[1] | wordr3[1];
2715 append_0x01_4 (&w[0], &w[1], &w[2], &w[3], pw_len);
2718 * reverse message block
2723 m[0] = hl32_to_64 (w[15], w[14]);
2724 m[1] = hl32_to_64 (w[13], w[12]);
2725 m[2] = hl32_to_64 (w[11], w[10]);
2726 m[3] = hl32_to_64 (w[ 9], w[ 8]);
2727 m[4] = hl32_to_64 (w[ 7], w[ 6]);
2728 m[5] = hl32_to_64 (w[ 5], w[ 4]);
2729 m[6] = hl32_to_64 (w[ 3], w[ 2]);
2730 m[7] = hl32_to_64 (w[ 1], w[ 0]);
2732 m[0] = swap_workaround (m[0]);
2733 m[1] = swap_workaround (m[1]);
2734 m[2] = swap_workaround (m[2]);
2735 m[3] = swap_workaround (m[3]);
2736 m[4] = swap_workaround (m[4]);
2737 m[5] = swap_workaround (m[5]);
2738 m[6] = swap_workaround (m[6]);
2739 m[7] = swap_workaround (m[7]);
2741 // state buffer (hash)
2754 streebog_g (h, m, s_sbob_sl64);
2765 z[7] = swap_workaround ((u64) (pw_len * 8));
2767 streebog_g (h, z, s_sbob_sl64);
2768 streebog_g (h, m, s_sbob_sl64);
2770 const u32 r0 = l32_from_64 (h[0]);
2771 const u32 r1 = h32_from_64 (h[0]);
2772 const u32 r2 = l32_from_64 (h[1]);
2773 const u32 r3 = h32_from_64 (h[1]);
2775 #include VECT_COMPARE_S
2779 extern "C" __global__ void __launch_bounds__ (256, 1) m11800_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
2783 extern "C" __global__ void __launch_bounds__ (256, 1) m11800_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)