2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
16 #include "include/kernel_functions.c"
17 #include "types_ocl.c"
21 #define COMPARE_M "check_multi_vect1_comp4.c"
24 __constant u32 k_sha256[64] =
26 SHA256C00, SHA256C01, SHA256C02, SHA256C03,
27 SHA256C04, SHA256C05, SHA256C06, SHA256C07,
28 SHA256C08, SHA256C09, SHA256C0a, SHA256C0b,
29 SHA256C0c, SHA256C0d, SHA256C0e, SHA256C0f,
30 SHA256C10, SHA256C11, SHA256C12, SHA256C13,
31 SHA256C14, SHA256C15, SHA256C16, SHA256C17,
32 SHA256C18, SHA256C19, SHA256C1a, SHA256C1b,
33 SHA256C1c, SHA256C1d, SHA256C1e, SHA256C1f,
34 SHA256C20, SHA256C21, SHA256C22, SHA256C23,
35 SHA256C24, SHA256C25, SHA256C26, SHA256C27,
36 SHA256C28, SHA256C29, SHA256C2a, SHA256C2b,
37 SHA256C2c, SHA256C2d, SHA256C2e, SHA256C2f,
38 SHA256C30, SHA256C31, SHA256C32, SHA256C33,
39 SHA256C34, SHA256C35, SHA256C36, SHA256C37,
40 SHA256C38, SHA256C39, SHA256C3a, SHA256C3b,
41 SHA256C3c, SHA256C3d, SHA256C3e, SHA256C3f,
44 static void sha256_transform (const u32 w[16], u32 digest[8])
55 u32 w0_t = swap_workaround (w[ 0]);
56 u32 w1_t = swap_workaround (w[ 1]);
57 u32 w2_t = swap_workaround (w[ 2]);
58 u32 w3_t = swap_workaround (w[ 3]);
59 u32 w4_t = swap_workaround (w[ 4]);
60 u32 w5_t = swap_workaround (w[ 5]);
61 u32 w6_t = swap_workaround (w[ 6]);
62 u32 w7_t = swap_workaround (w[ 7]);
63 u32 w8_t = swap_workaround (w[ 8]);
64 u32 w9_t = swap_workaround (w[ 9]);
65 u32 wa_t = swap_workaround (w[10]);
66 u32 wb_t = swap_workaround (w[11]);
67 u32 wc_t = swap_workaround (w[12]);
68 u32 wd_t = swap_workaround (w[13]);
69 u32 we_t = swap_workaround (w[14]);
70 u32 wf_t = swap_workaround (w[15]);
72 #define ROUND_EXPAND() \
74 w0_t = SHA256_EXPAND (we_t, w9_t, w1_t, w0_t); \
75 w1_t = SHA256_EXPAND (wf_t, wa_t, w2_t, w1_t); \
76 w2_t = SHA256_EXPAND (w0_t, wb_t, w3_t, w2_t); \
77 w3_t = SHA256_EXPAND (w1_t, wc_t, w4_t, w3_t); \
78 w4_t = SHA256_EXPAND (w2_t, wd_t, w5_t, w4_t); \
79 w5_t = SHA256_EXPAND (w3_t, we_t, w6_t, w5_t); \
80 w6_t = SHA256_EXPAND (w4_t, wf_t, w7_t, w6_t); \
81 w7_t = SHA256_EXPAND (w5_t, w0_t, w8_t, w7_t); \
82 w8_t = SHA256_EXPAND (w6_t, w1_t, w9_t, w8_t); \
83 w9_t = SHA256_EXPAND (w7_t, w2_t, wa_t, w9_t); \
84 wa_t = SHA256_EXPAND (w8_t, w3_t, wb_t, wa_t); \
85 wb_t = SHA256_EXPAND (w9_t, w4_t, wc_t, wb_t); \
86 wc_t = SHA256_EXPAND (wa_t, w5_t, wd_t, wc_t); \
87 wd_t = SHA256_EXPAND (wb_t, w6_t, we_t, wd_t); \
88 we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); \
89 wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); \
92 #define ROUND_STEP(i) \
94 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha256[i + 0]); \
95 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha256[i + 1]); \
96 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha256[i + 2]); \
97 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha256[i + 3]); \
98 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha256[i + 4]); \
99 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha256[i + 5]); \
100 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha256[i + 6]); \
101 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha256[i + 7]); \
102 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha256[i + 8]); \
103 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha256[i + 9]); \
104 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha256[i + 10]); \
105 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha256[i + 11]); \
106 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha256[i + 12]); \
107 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha256[i + 13]); \
108 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, k_sha256[i + 14]); \
109 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha256[i + 15]); \
115 for (int i = 16; i < 64; i += 16)
117 ROUND_EXPAND (); ROUND_STEP (i);
130 static void sha256_transform_no14 (const u32 w[16], u32 digest[8])
151 sha256_transform (w_t, digest);
154 static void init_ctx (u32 digest[8])
156 digest[0] = SHA256M_A;
157 digest[1] = SHA256M_B;
158 digest[2] = SHA256M_C;
159 digest[3] = SHA256M_D;
160 digest[4] = SHA256M_E;
161 digest[5] = SHA256M_F;
162 digest[6] = SHA256M_G;
163 digest[7] = SHA256M_H;
166 static void bzero16 (u32 block[16])
186 static void bswap8 (u32 block[16])
188 block[ 0] = swap_workaround (block[ 0]);
189 block[ 1] = swap_workaround (block[ 1]);
190 block[ 2] = swap_workaround (block[ 2]);
191 block[ 3] = swap_workaround (block[ 3]);
192 block[ 4] = swap_workaround (block[ 4]);
193 block[ 5] = swap_workaround (block[ 5]);
194 block[ 6] = swap_workaround (block[ 6]);
195 block[ 7] = swap_workaround (block[ 7]);
198 static u32 memcat16 (u32 block[16], const u32 block_len, const u32 append[4], const u32 append_len)
200 const u32 mod = block_len & 3;
201 const u32 div = block_len / 4;
209 const int offset_minus_4 = 4 - block_len;
211 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
212 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
213 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
214 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
215 tmp4 = amd_bytealign ( 0, append[3], offset_minus_4);
228 case 0: block[ 0] |= tmp0;
234 case 1: block[ 1] |= tmp0;
240 case 2: block[ 2] |= tmp0;
246 case 3: block[ 3] |= tmp0;
252 case 4: block[ 4] |= tmp0;
258 case 5: block[ 5] |= tmp0;
264 case 6: block[ 6] |= tmp0;
270 case 7: block[ 7] |= tmp0;
276 case 8: block[ 8] |= tmp0;
282 case 9: block[ 9] |= tmp0;
288 case 10: block[10] |= tmp0;
294 case 11: block[11] |= tmp0;
300 case 12: block[12] |= tmp0;
305 case 13: block[13] |= tmp0;
309 case 14: block[14] |= tmp0;
312 case 15: block[15] |= tmp0;
316 u32 new_len = block_len + append_len;
321 static u32 memcat16c (u32 block[16], const u32 block_len, const u32 append[4], const u32 append_len, u32 digest[8])
323 const u32 mod = block_len & 3;
324 const u32 div = block_len / 4;
332 const int offset_minus_4 = 4 - block_len;
334 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
335 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
336 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
337 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
338 tmp4 = amd_bytealign ( 0, append[3], offset_minus_4);
349 u32 carry[4] = { 0, 0, 0, 0 };
353 case 0: block[ 0] |= tmp0;
359 case 1: block[ 1] |= tmp0;
365 case 2: block[ 2] |= tmp0;
371 case 3: block[ 3] |= tmp0;
377 case 4: block[ 4] |= tmp0;
383 case 5: block[ 5] |= tmp0;
389 case 6: block[ 6] |= tmp0;
395 case 7: block[ 7] |= tmp0;
401 case 8: block[ 8] |= tmp0;
407 case 9: block[ 9] |= tmp0;
413 case 10: block[10] |= tmp0;
419 case 11: block[11] |= tmp0;
425 case 12: block[12] |= tmp0;
431 case 13: block[13] |= tmp0;
437 case 14: block[14] |= tmp0;
443 case 15: block[15] |= tmp0;
451 u32 new_len = block_len + append_len;
457 sha256_transform (block, digest);
470 static u32 memcat20 (u32 block[20], const u32 block_len, const u32 append[4], const u32 append_len)
472 const u32 mod = block_len & 3;
473 const u32 div = block_len / 4;
481 const int offset_minus_4 = 4 - block_len;
483 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
484 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
485 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
486 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
487 tmp4 = amd_bytealign ( 0, append[3], offset_minus_4);
500 case 0: block[ 0] |= tmp0;
506 case 1: block[ 1] |= tmp0;
512 case 2: block[ 2] |= tmp0;
518 case 3: block[ 3] |= tmp0;
524 case 4: block[ 4] |= tmp0;
530 case 5: block[ 5] |= tmp0;
536 case 6: block[ 6] |= tmp0;
542 case 7: block[ 7] |= tmp0;
548 case 8: block[ 8] |= tmp0;
554 case 9: block[ 9] |= tmp0;
560 case 10: block[10] |= tmp0;
566 case 11: block[11] |= tmp0;
572 case 12: block[12] |= tmp0;
578 case 13: block[13] |= tmp0;
584 case 14: block[14] |= tmp0;
590 case 15: block[15] |= tmp0;
598 return block_len + append_len;
601 static u32 memcat20_x80 (u32 block[20], const u32 block_len, const u32 append[4], const u32 append_len)
603 const u32 mod = block_len & 3;
604 const u32 div = block_len / 4;
612 const int offset_minus_4 = 4 - block_len;
614 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
615 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
616 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
617 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
618 tmp4 = amd_bytealign ( 0x80, append[3], offset_minus_4);
631 case 0: block[ 0] |= tmp0;
637 case 1: block[ 1] |= tmp0;
643 case 2: block[ 2] |= tmp0;
649 case 3: block[ 3] |= tmp0;
655 case 4: block[ 4] |= tmp0;
661 case 5: block[ 5] |= tmp0;
667 case 6: block[ 6] |= tmp0;
673 case 7: block[ 7] |= tmp0;
679 case 8: block[ 8] |= tmp0;
685 case 9: block[ 9] |= tmp0;
691 case 10: block[10] |= tmp0;
697 case 11: block[11] |= tmp0;
703 case 12: block[12] |= tmp0;
709 case 13: block[13] |= tmp0;
715 case 14: block[14] |= tmp0;
721 case 15: block[15] |= tmp0;
729 return block_len + append_len;
732 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07400_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
738 const u32 gid = get_global_id (0);
740 if (gid >= gid_max) return;
744 w0[0] = pws[gid].i[0];
745 w0[1] = pws[gid].i[1];
746 w0[2] = pws[gid].i[2];
747 w0[3] = pws[gid].i[3];
749 const u32 pw_len = pws[gid].pw_len;
757 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
758 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
759 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
760 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
762 u32 salt_len = salt_bufs[salt_pos].salt_len;
768 u32 block_len; // never reaches > 64
769 u32 transform_len; // required for w[15] = len * 8
777 /* Prepare for the real work. */
785 block_len = memcat16 (block, block_len, w0, pw_len);
789 block_len = memcat16 (block, block_len, salt_buf, salt_len);
793 block_len = memcat16 (block, block_len, w0, pw_len);
795 append_0x80_4 (block, block_len);
797 block[15] = swap_workaround (block_len * 8);
799 init_ctx (alt_result);
801 sha256_transform (block, alt_result);
809 u32 alt_result_tmp[8];
811 alt_result_tmp[0] = alt_result[0];
812 alt_result_tmp[1] = alt_result[1];
813 alt_result_tmp[2] = alt_result[2];
814 alt_result_tmp[3] = alt_result[3];
815 alt_result_tmp[4] = 0;
816 alt_result_tmp[5] = 0;
817 alt_result_tmp[6] = 0;
818 alt_result_tmp[7] = 0;
820 truncate_block (alt_result_tmp, pw_len);
822 /* Add the key string. */
824 block_len = memcat16 (block, block_len, w0, pw_len);
826 /* The last part is the salt string. This must be at most 8
827 characters and it ends at the first `$' character (for
828 compatibility with existing implementations). */
830 block_len = memcat16 (block, block_len, salt_buf, salt_len);
832 /* Now get result of this (32 bytes) and add it to the other
835 block_len = memcat16 (block, block_len, alt_result_tmp, pw_len);
837 transform_len = block_len;
839 /* Take the binary representation of the length of the key and for every
840 1 add the alternate sum, for every 0 the key. */
842 alt_result_tmp[0] = alt_result[0];
843 alt_result_tmp[1] = alt_result[1];
844 alt_result_tmp[2] = alt_result[2];
845 alt_result_tmp[3] = alt_result[3];
846 alt_result_tmp[4] = alt_result[4];
847 alt_result_tmp[5] = alt_result[5];
848 alt_result_tmp[6] = alt_result[6];
849 alt_result_tmp[7] = alt_result[7];
851 init_ctx (alt_result);
853 for (u32 j = pw_len; j; j >>= 1)
857 block_len = memcat16c (block, block_len, &alt_result_tmp[0], 16, alt_result);
858 block_len = memcat16c (block, block_len, &alt_result_tmp[4], 16, alt_result);
864 block_len = memcat16c (block, block_len, w0, pw_len, alt_result);
866 transform_len += pw_len;
870 append_0x80_4 (block, block_len);
874 sha256_transform (block, alt_result);
879 block[15] = swap_workaround (transform_len * 8);
881 sha256_transform (block, alt_result);
885 tmps[gid].alt_result[0] = alt_result[0];
886 tmps[gid].alt_result[1] = alt_result[1];
887 tmps[gid].alt_result[2] = alt_result[2];
888 tmps[gid].alt_result[3] = alt_result[3];
889 tmps[gid].alt_result[4] = alt_result[4];
890 tmps[gid].alt_result[5] = alt_result[5];
891 tmps[gid].alt_result[6] = alt_result[6];
892 tmps[gid].alt_result[7] = alt_result[7];
894 /* Start computation of P byte sequence. */
902 /* For every character in the password add the entire password. */
906 for (u32 j = 0; j < pw_len; j++)
908 block_len = memcat16c (block, block_len, w0, pw_len, p_bytes);
910 transform_len += pw_len;
913 /* Finish the digest. */
915 append_0x80_4 (block, block_len);
919 sha256_transform (block, p_bytes);
924 block[15] = swap_workaround (transform_len * 8);
926 sha256_transform (block, p_bytes);
930 truncate_block (p_bytes, pw_len);
932 tmps[gid].p_bytes[0] = p_bytes[0];
933 tmps[gid].p_bytes[1] = p_bytes[1];
934 tmps[gid].p_bytes[2] = p_bytes[2];
935 tmps[gid].p_bytes[3] = p_bytes[3];
937 /* Start computation of S byte sequence. */
945 /* For every character in the password add the entire password. */
949 for (u32 j = 0; j < 16 + (alt_result[0] & 0xff); j++)
951 block_len = memcat16c (block, block_len, salt_buf, salt_len, s_bytes);
953 transform_len += salt_len;
956 /* Finish the digest. */
958 append_0x80_4 (block, block_len);
962 sha256_transform (block, s_bytes);
967 block[15] = swap_workaround (transform_len * 8);
969 sha256_transform (block, s_bytes);
973 truncate_block (s_bytes, salt_len);
975 tmps[gid].s_bytes[0] = s_bytes[0];
976 tmps[gid].s_bytes[1] = s_bytes[1];
977 tmps[gid].s_bytes[2] = s_bytes[2];
978 tmps[gid].s_bytes[3] = s_bytes[3];
981 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07400_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
987 const u32 gid = get_global_id (0);
989 if (gid >= gid_max) return;
991 const u32 pw_len = pws[gid].pw_len;
999 p_bytes[0] = tmps[gid].p_bytes[0];
1000 p_bytes[1] = tmps[gid].p_bytes[1];
1001 p_bytes[2] = tmps[gid].p_bytes[2];
1002 p_bytes[3] = tmps[gid].p_bytes[3];
1006 p_bytes_x80[0] = tmps[gid].p_bytes[0];
1007 p_bytes_x80[1] = tmps[gid].p_bytes[1];
1008 p_bytes_x80[2] = tmps[gid].p_bytes[2];
1009 p_bytes_x80[3] = tmps[gid].p_bytes[3];
1011 append_0x80_1 (p_bytes_x80, pw_len);
1015 s_bytes[0] = tmps[gid].s_bytes[0];
1016 s_bytes[1] = tmps[gid].s_bytes[1];
1017 s_bytes[2] = tmps[gid].s_bytes[2];
1018 s_bytes[3] = tmps[gid].s_bytes[3];
1022 alt_result[0] = tmps[gid].alt_result[0];
1023 alt_result[1] = tmps[gid].alt_result[1];
1024 alt_result[2] = tmps[gid].alt_result[2];
1025 alt_result[3] = tmps[gid].alt_result[3];
1026 alt_result[4] = tmps[gid].alt_result[4];
1027 alt_result[5] = tmps[gid].alt_result[5];
1028 alt_result[6] = tmps[gid].alt_result[6];
1029 alt_result[7] = tmps[gid].alt_result[7];
1031 u32 salt_len = salt_bufs[salt_pos].salt_len;
1033 /* Repeatedly run the collected hash value through SHA256 to burn
1036 for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++)
1044 bzero16 (&block[ 0]);
1045 bzero16 (&block[16]);
1049 const u32 j1 = (j & 1) ? 1 : 0;
1050 const u32 j3 = (j % 3) ? 1 : 0;
1051 const u32 j7 = (j % 7) ? 1 : 0;
1055 block[0] = p_bytes[0];
1056 block[1] = p_bytes[1];
1057 block[2] = p_bytes[2];
1058 block[3] = p_bytes[3];
1064 block[0] = alt_result[0];
1065 block[1] = alt_result[1];
1066 block[2] = alt_result[2];
1067 block[3] = alt_result[3];
1068 block[4] = alt_result[4];
1069 block[5] = alt_result[5];
1070 block[6] = alt_result[6];
1071 block[7] = alt_result[7];
1078 block_len = memcat20 (block, block_len, s_bytes, salt_len);
1083 block_len = memcat20 (block, block_len, p_bytes, pw_len);
1088 block_len = memcat20 (block, block_len, &alt_result[0], 16);
1089 block_len = memcat20_x80 (block, block_len, &alt_result[4], 16);
1093 block_len = memcat20 (block, block_len, p_bytes_x80, pw_len);
1096 if (block_len >= 56)
1098 sha256_transform (block, tmp);
1100 block[ 0] = block[16];
1101 block[ 1] = block[17];
1102 block[ 2] = block[18];
1103 block[ 3] = block[19];
1118 block[15] = swap_workaround (block_len * 8);
1120 sha256_transform_no14 (block, tmp);
1124 alt_result[0] = tmp[0];
1125 alt_result[1] = tmp[1];
1126 alt_result[2] = tmp[2];
1127 alt_result[3] = tmp[3];
1128 alt_result[4] = tmp[4];
1129 alt_result[5] = tmp[5];
1130 alt_result[6] = tmp[6];
1131 alt_result[7] = tmp[7];
1134 tmps[gid].alt_result[0] = alt_result[0];
1135 tmps[gid].alt_result[1] = alt_result[1];
1136 tmps[gid].alt_result[2] = alt_result[2];
1137 tmps[gid].alt_result[3] = alt_result[3];
1138 tmps[gid].alt_result[4] = alt_result[4];
1139 tmps[gid].alt_result[5] = alt_result[5];
1140 tmps[gid].alt_result[6] = alt_result[6];
1141 tmps[gid].alt_result[7] = alt_result[7];
1144 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07400_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
1150 const u32 gid = get_global_id (0);
1152 if (gid >= gid_max) return;
1154 const u32 lid = get_local_id (0);
1156 const u32 r0 = tmps[gid].alt_result[0];
1157 const u32 r1 = tmps[gid].alt_result[1];
1158 const u32 r2 = tmps[gid].alt_result[2];
1159 const u32 r3 = tmps[gid].alt_result[3];