2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
36 __constant u32 k_sha256[64] =
38 SHA256C00, SHA256C01, SHA256C02, SHA256C03,
39 SHA256C04, SHA256C05, SHA256C06, SHA256C07,
40 SHA256C08, SHA256C09, SHA256C0a, SHA256C0b,
41 SHA256C0c, SHA256C0d, SHA256C0e, SHA256C0f,
42 SHA256C10, SHA256C11, SHA256C12, SHA256C13,
43 SHA256C14, SHA256C15, SHA256C16, SHA256C17,
44 SHA256C18, SHA256C19, SHA256C1a, SHA256C1b,
45 SHA256C1c, SHA256C1d, SHA256C1e, SHA256C1f,
46 SHA256C20, SHA256C21, SHA256C22, SHA256C23,
47 SHA256C24, SHA256C25, SHA256C26, SHA256C27,
48 SHA256C28, SHA256C29, SHA256C2a, SHA256C2b,
49 SHA256C2c, SHA256C2d, SHA256C2e, SHA256C2f,
50 SHA256C30, SHA256C31, SHA256C32, SHA256C33,
51 SHA256C34, SHA256C35, SHA256C36, SHA256C37,
52 SHA256C38, SHA256C39, SHA256C3a, SHA256C3b,
53 SHA256C3c, SHA256C3d, SHA256C3e, SHA256C3f,
56 static void sha256_transform (const u32x w[16], u32x digest[8])
67 u32x w0_t = swap_workaround (w[ 0]);
68 u32x w1_t = swap_workaround (w[ 1]);
69 u32x w2_t = swap_workaround (w[ 2]);
70 u32x w3_t = swap_workaround (w[ 3]);
71 u32x w4_t = swap_workaround (w[ 4]);
72 u32x w5_t = swap_workaround (w[ 5]);
73 u32x w6_t = swap_workaround (w[ 6]);
74 u32x w7_t = swap_workaround (w[ 7]);
75 u32x w8_t = swap_workaround (w[ 8]);
76 u32x w9_t = swap_workaround (w[ 9]);
77 u32x wa_t = swap_workaround (w[10]);
78 u32x wb_t = swap_workaround (w[11]);
79 u32x wc_t = swap_workaround (w[12]);
80 u32x wd_t = swap_workaround (w[13]);
81 u32x we_t = swap_workaround (w[14]);
82 u32x wf_t = swap_workaround (w[15]);
84 #define ROUND_EXPAND() \
86 w0_t = SHA256_EXPAND (we_t, w9_t, w1_t, w0_t); \
87 w1_t = SHA256_EXPAND (wf_t, wa_t, w2_t, w1_t); \
88 w2_t = SHA256_EXPAND (w0_t, wb_t, w3_t, w2_t); \
89 w3_t = SHA256_EXPAND (w1_t, wc_t, w4_t, w3_t); \
90 w4_t = SHA256_EXPAND (w2_t, wd_t, w5_t, w4_t); \
91 w5_t = SHA256_EXPAND (w3_t, we_t, w6_t, w5_t); \
92 w6_t = SHA256_EXPAND (w4_t, wf_t, w7_t, w6_t); \
93 w7_t = SHA256_EXPAND (w5_t, w0_t, w8_t, w7_t); \
94 w8_t = SHA256_EXPAND (w6_t, w1_t, w9_t, w8_t); \
95 w9_t = SHA256_EXPAND (w7_t, w2_t, wa_t, w9_t); \
96 wa_t = SHA256_EXPAND (w8_t, w3_t, wb_t, wa_t); \
97 wb_t = SHA256_EXPAND (w9_t, w4_t, wc_t, wb_t); \
98 wc_t = SHA256_EXPAND (wa_t, w5_t, wd_t, wc_t); \
99 wd_t = SHA256_EXPAND (wb_t, w6_t, we_t, wd_t); \
100 we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); \
101 wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); \
104 #define ROUND_STEP(i) \
106 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha256[i + 0]); \
107 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha256[i + 1]); \
108 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha256[i + 2]); \
109 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha256[i + 3]); \
110 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha256[i + 4]); \
111 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha256[i + 5]); \
112 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha256[i + 6]); \
113 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha256[i + 7]); \
114 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha256[i + 8]); \
115 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha256[i + 9]); \
116 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha256[i + 10]); \
117 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha256[i + 11]); \
118 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha256[i + 12]); \
119 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha256[i + 13]); \
120 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, k_sha256[i + 14]); \
121 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha256[i + 15]); \
127 for (int i = 16; i < 64; i += 16)
129 ROUND_EXPAND (); ROUND_STEP (i);
142 static void sha256_transform_no14 (const u32x w[16], u32x digest[8])
163 sha256_transform (w_t, digest);
166 static void init_ctx (u32x digest[8])
168 digest[0] = SHA256M_A;
169 digest[1] = SHA256M_B;
170 digest[2] = SHA256M_C;
171 digest[3] = SHA256M_D;
172 digest[4] = SHA256M_E;
173 digest[5] = SHA256M_F;
174 digest[6] = SHA256M_G;
175 digest[7] = SHA256M_H;
178 static void bzero16 (u32x block[16])
198 static void bswap8 (u32x block[16])
200 block[ 0] = swap_workaround (block[ 0]);
201 block[ 1] = swap_workaround (block[ 1]);
202 block[ 2] = swap_workaround (block[ 2]);
203 block[ 3] = swap_workaround (block[ 3]);
204 block[ 4] = swap_workaround (block[ 4]);
205 block[ 5] = swap_workaround (block[ 5]);
206 block[ 6] = swap_workaround (block[ 6]);
207 block[ 7] = swap_workaround (block[ 7]);
210 static u32 memcat16 (u32x block[16], const u32 block_len, const u32x append[4], const u32 append_len)
212 const u32 mod = block_len & 3;
213 const u32 div = block_len / 4;
221 const int offset_minus_4 = 4 - block_len;
223 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
224 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
225 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
226 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
227 tmp4 = amd_bytealign ( 0, append[3], offset_minus_4);
240 case 0: block[ 0] |= tmp0;
246 case 1: block[ 1] |= tmp0;
252 case 2: block[ 2] |= tmp0;
258 case 3: block[ 3] |= tmp0;
264 case 4: block[ 4] |= tmp0;
270 case 5: block[ 5] |= tmp0;
276 case 6: block[ 6] |= tmp0;
282 case 7: block[ 7] |= tmp0;
288 case 8: block[ 8] |= tmp0;
294 case 9: block[ 9] |= tmp0;
300 case 10: block[10] |= tmp0;
306 case 11: block[11] |= tmp0;
312 case 12: block[12] |= tmp0;
317 case 13: block[13] |= tmp0;
321 case 14: block[14] |= tmp0;
324 case 15: block[15] |= tmp0;
328 u32 new_len = block_len + append_len;
333 static u32 memcat16c (u32x block[16], const u32 block_len, const u32x append[4], const u32 append_len, u32x digest[8])
335 const u32 mod = block_len & 3;
336 const u32 div = block_len / 4;
344 const int offset_minus_4 = 4 - block_len;
346 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
347 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
348 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
349 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
350 tmp4 = amd_bytealign ( 0, append[3], offset_minus_4);
361 u32x carry[4] = { 0, 0, 0, 0 };
365 case 0: block[ 0] |= tmp0;
371 case 1: block[ 1] |= tmp0;
377 case 2: block[ 2] |= tmp0;
383 case 3: block[ 3] |= tmp0;
389 case 4: block[ 4] |= tmp0;
395 case 5: block[ 5] |= tmp0;
401 case 6: block[ 6] |= tmp0;
407 case 7: block[ 7] |= tmp0;
413 case 8: block[ 8] |= tmp0;
419 case 9: block[ 9] |= tmp0;
425 case 10: block[10] |= tmp0;
431 case 11: block[11] |= tmp0;
437 case 12: block[12] |= tmp0;
443 case 13: block[13] |= tmp0;
449 case 14: block[14] |= tmp0;
455 case 15: block[15] |= tmp0;
463 u32 new_len = block_len + append_len;
469 sha256_transform (block, digest);
482 static u32 memcat20 (u32x block[20], const u32 block_len, const u32x append[4], const u32 append_len)
484 const u32 mod = block_len & 3;
485 const u32 div = block_len / 4;
493 const int offset_minus_4 = 4 - block_len;
495 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
496 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
497 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
498 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
499 tmp4 = amd_bytealign ( 0, append[3], offset_minus_4);
512 case 0: block[ 0] |= tmp0;
518 case 1: block[ 1] |= tmp0;
524 case 2: block[ 2] |= tmp0;
530 case 3: block[ 3] |= tmp0;
536 case 4: block[ 4] |= tmp0;
542 case 5: block[ 5] |= tmp0;
548 case 6: block[ 6] |= tmp0;
554 case 7: block[ 7] |= tmp0;
560 case 8: block[ 8] |= tmp0;
566 case 9: block[ 9] |= tmp0;
572 case 10: block[10] |= tmp0;
578 case 11: block[11] |= tmp0;
584 case 12: block[12] |= tmp0;
590 case 13: block[13] |= tmp0;
596 case 14: block[14] |= tmp0;
602 case 15: block[15] |= tmp0;
610 return block_len + append_len;
613 static u32 memcat20_x80 (u32x block[20], const u32 block_len, const u32x append[4], const u32 append_len)
615 const u32 mod = block_len & 3;
616 const u32 div = block_len / 4;
624 const int offset_minus_4 = 4 - block_len;
626 tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
627 tmp1 = amd_bytealign (append[1], append[0], offset_minus_4);
628 tmp2 = amd_bytealign (append[2], append[1], offset_minus_4);
629 tmp3 = amd_bytealign (append[3], append[2], offset_minus_4);
630 tmp4 = amd_bytealign ( 0x80, append[3], offset_minus_4);
643 case 0: block[ 0] |= tmp0;
649 case 1: block[ 1] |= tmp0;
655 case 2: block[ 2] |= tmp0;
661 case 3: block[ 3] |= tmp0;
667 case 4: block[ 4] |= tmp0;
673 case 5: block[ 5] |= tmp0;
679 case 6: block[ 6] |= tmp0;
685 case 7: block[ 7] |= tmp0;
691 case 8: block[ 8] |= tmp0;
697 case 9: block[ 9] |= tmp0;
703 case 10: block[10] |= tmp0;
709 case 11: block[11] |= tmp0;
715 case 12: block[12] |= tmp0;
721 case 13: block[13] |= tmp0;
727 case 14: block[14] |= tmp0;
733 case 15: block[15] |= tmp0;
741 return block_len + append_len;
744 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07400_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
750 const u32 gid = get_global_id (0);
752 if (gid >= gid_max) return;
756 w0[0] = pws[gid].i[0];
757 w0[1] = pws[gid].i[1];
758 w0[2] = pws[gid].i[2];
759 w0[3] = pws[gid].i[3];
761 const u32 pw_len = pws[gid].pw_len;
769 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
770 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
771 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
772 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
774 u32 salt_len = salt_bufs[salt_pos].salt_len;
780 u32 block_len; // never reaches > 64
781 u32 transform_len; // required for w[15] = len * 8
789 /* Prepare for the real work. */
797 block_len = memcat16 (block, block_len, w0, pw_len);
801 block_len = memcat16 (block, block_len, salt_buf, salt_len);
805 block_len = memcat16 (block, block_len, w0, pw_len);
807 append_0x80_4 (block, block_len);
809 block[15] = swap_workaround (block_len * 8);
811 init_ctx (alt_result);
813 sha256_transform (block, alt_result);
821 u32x alt_result_tmp[8];
823 alt_result_tmp[0] = alt_result[0];
824 alt_result_tmp[1] = alt_result[1];
825 alt_result_tmp[2] = alt_result[2];
826 alt_result_tmp[3] = alt_result[3];
827 alt_result_tmp[4] = 0;
828 alt_result_tmp[5] = 0;
829 alt_result_tmp[6] = 0;
830 alt_result_tmp[7] = 0;
832 truncate_block (alt_result_tmp, pw_len);
834 /* Add the key string. */
836 block_len = memcat16 (block, block_len, w0, pw_len);
838 /* The last part is the salt string. This must be at most 8
839 characters and it ends at the first `$' character (for
840 compatibility with existing implementations). */
842 block_len = memcat16 (block, block_len, salt_buf, salt_len);
844 /* Now get result of this (32 bytes) and add it to the other
847 block_len = memcat16 (block, block_len, alt_result_tmp, pw_len);
849 transform_len = block_len;
851 /* Take the binary representation of the length of the key and for every
852 1 add the alternate sum, for every 0 the key. */
854 alt_result_tmp[0] = alt_result[0];
855 alt_result_tmp[1] = alt_result[1];
856 alt_result_tmp[2] = alt_result[2];
857 alt_result_tmp[3] = alt_result[3];
858 alt_result_tmp[4] = alt_result[4];
859 alt_result_tmp[5] = alt_result[5];
860 alt_result_tmp[6] = alt_result[6];
861 alt_result_tmp[7] = alt_result[7];
863 init_ctx (alt_result);
865 for (u32 j = pw_len; j; j >>= 1)
869 block_len = memcat16c (block, block_len, &alt_result_tmp[0], 16, alt_result);
870 block_len = memcat16c (block, block_len, &alt_result_tmp[4], 16, alt_result);
876 block_len = memcat16c (block, block_len, w0, pw_len, alt_result);
878 transform_len += pw_len;
882 append_0x80_4 (block, block_len);
886 sha256_transform (block, alt_result);
891 block[15] = swap_workaround (transform_len * 8);
893 sha256_transform (block, alt_result);
897 tmps[gid].alt_result[0] = alt_result[0];
898 tmps[gid].alt_result[1] = alt_result[1];
899 tmps[gid].alt_result[2] = alt_result[2];
900 tmps[gid].alt_result[3] = alt_result[3];
901 tmps[gid].alt_result[4] = alt_result[4];
902 tmps[gid].alt_result[5] = alt_result[5];
903 tmps[gid].alt_result[6] = alt_result[6];
904 tmps[gid].alt_result[7] = alt_result[7];
906 /* Start computation of P byte sequence. */
914 /* For every character in the password add the entire password. */
918 for (u32 j = 0; j < pw_len; j++)
920 block_len = memcat16c (block, block_len, w0, pw_len, p_bytes);
922 transform_len += pw_len;
925 /* Finish the digest. */
927 append_0x80_4 (block, block_len);
931 sha256_transform (block, p_bytes);
936 block[15] = swap_workaround (transform_len * 8);
938 sha256_transform (block, p_bytes);
942 truncate_block (p_bytes, pw_len);
944 tmps[gid].p_bytes[0] = p_bytes[0];
945 tmps[gid].p_bytes[1] = p_bytes[1];
946 tmps[gid].p_bytes[2] = p_bytes[2];
947 tmps[gid].p_bytes[3] = p_bytes[3];
949 /* Start computation of S byte sequence. */
957 /* For every character in the password add the entire password. */
961 for (u32 j = 0; j < 16 + (alt_result[0] & 0xff); j++)
963 block_len = memcat16c (block, block_len, salt_buf, salt_len, s_bytes);
965 transform_len += salt_len;
968 /* Finish the digest. */
970 append_0x80_4 (block, block_len);
974 sha256_transform (block, s_bytes);
979 block[15] = swap_workaround (transform_len * 8);
981 sha256_transform (block, s_bytes);
985 truncate_block (s_bytes, salt_len);
987 tmps[gid].s_bytes[0] = s_bytes[0];
988 tmps[gid].s_bytes[1] = s_bytes[1];
989 tmps[gid].s_bytes[2] = s_bytes[2];
990 tmps[gid].s_bytes[3] = s_bytes[3];
993 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07400_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
999 const u32 gid = get_global_id (0);
1001 if (gid >= gid_max) return;
1003 const u32 pw_len = pws[gid].pw_len;
1011 p_bytes[0] = tmps[gid].p_bytes[0];
1012 p_bytes[1] = tmps[gid].p_bytes[1];
1013 p_bytes[2] = tmps[gid].p_bytes[2];
1014 p_bytes[3] = tmps[gid].p_bytes[3];
1016 u32x p_bytes_x80[4];
1018 p_bytes_x80[0] = tmps[gid].p_bytes[0];
1019 p_bytes_x80[1] = tmps[gid].p_bytes[1];
1020 p_bytes_x80[2] = tmps[gid].p_bytes[2];
1021 p_bytes_x80[3] = tmps[gid].p_bytes[3];
1023 append_0x80_1 (p_bytes_x80, pw_len);
1027 s_bytes[0] = tmps[gid].s_bytes[0];
1028 s_bytes[1] = tmps[gid].s_bytes[1];
1029 s_bytes[2] = tmps[gid].s_bytes[2];
1030 s_bytes[3] = tmps[gid].s_bytes[3];
1034 alt_result[0] = tmps[gid].alt_result[0];
1035 alt_result[1] = tmps[gid].alt_result[1];
1036 alt_result[2] = tmps[gid].alt_result[2];
1037 alt_result[3] = tmps[gid].alt_result[3];
1038 alt_result[4] = tmps[gid].alt_result[4];
1039 alt_result[5] = tmps[gid].alt_result[5];
1040 alt_result[6] = tmps[gid].alt_result[6];
1041 alt_result[7] = tmps[gid].alt_result[7];
1043 u32 salt_len = salt_bufs[salt_pos].salt_len;
1045 /* Repeatedly run the collected hash value through SHA256 to burn
1048 for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++)
1056 bzero16 (&block[ 0]);
1057 bzero16 (&block[16]);
1061 const u32 j1 = (j & 1) ? 1 : 0;
1062 const u32 j3 = (j % 3) ? 1 : 0;
1063 const u32 j7 = (j % 7) ? 1 : 0;
1067 block[0] = p_bytes[0];
1068 block[1] = p_bytes[1];
1069 block[2] = p_bytes[2];
1070 block[3] = p_bytes[3];
1076 block[0] = alt_result[0];
1077 block[1] = alt_result[1];
1078 block[2] = alt_result[2];
1079 block[3] = alt_result[3];
1080 block[4] = alt_result[4];
1081 block[5] = alt_result[5];
1082 block[6] = alt_result[6];
1083 block[7] = alt_result[7];
1090 block_len = memcat20 (block, block_len, s_bytes, salt_len);
1095 block_len = memcat20 (block, block_len, p_bytes, pw_len);
1100 block_len = memcat20 (block, block_len, &alt_result[0], 16);
1101 block_len = memcat20_x80 (block, block_len, &alt_result[4], 16);
1105 block_len = memcat20 (block, block_len, p_bytes_x80, pw_len);
1108 if (block_len >= 56)
1110 sha256_transform (block, tmp);
1112 block[ 0] = block[16];
1113 block[ 1] = block[17];
1114 block[ 2] = block[18];
1115 block[ 3] = block[19];
1130 block[15] = swap_workaround (block_len * 8);
1132 sha256_transform_no14 (block, tmp);
1136 alt_result[0] = tmp[0];
1137 alt_result[1] = tmp[1];
1138 alt_result[2] = tmp[2];
1139 alt_result[3] = tmp[3];
1140 alt_result[4] = tmp[4];
1141 alt_result[5] = tmp[5];
1142 alt_result[6] = tmp[6];
1143 alt_result[7] = tmp[7];
1146 tmps[gid].alt_result[0] = alt_result[0];
1147 tmps[gid].alt_result[1] = alt_result[1];
1148 tmps[gid].alt_result[2] = alt_result[2];
1149 tmps[gid].alt_result[3] = alt_result[3];
1150 tmps[gid].alt_result[4] = alt_result[4];
1151 tmps[gid].alt_result[5] = alt_result[5];
1152 tmps[gid].alt_result[6] = alt_result[6];
1153 tmps[gid].alt_result[7] = alt_result[7];
1156 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m07400_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
1162 const u32 gid = get_global_id (0);
1164 if (gid >= gid_max) return;
1166 const u32 lid = get_local_id (0);
1168 const u32x r0 = tmps[gid].alt_result[0];
1169 const u32x r1 = tmps[gid].alt_result[1];
1170 const u32x r2 = tmps[gid].alt_result[2];
1171 const u32x r3 = tmps[gid].alt_result[3];
1175 #include VECT_COMPARE_M