2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
30 #include "types_amd.c"
31 #include "common_amd.c"
34 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
37 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
65 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
66 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
67 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
68 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
69 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
70 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
71 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
72 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
73 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
74 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
75 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
76 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
77 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
78 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
79 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
80 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
81 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
82 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
83 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
84 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
89 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
90 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
91 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
92 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
93 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
94 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
95 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
96 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
97 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
98 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
99 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
100 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
101 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
102 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
103 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
104 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
105 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
106 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
107 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
108 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
113 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
114 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
115 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
116 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
117 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
118 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
119 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
120 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
121 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
122 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
123 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
124 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
125 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
126 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
127 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
128 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
129 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
130 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
131 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
132 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
137 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
138 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
139 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
140 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
141 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
142 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
143 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
144 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
145 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
146 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
147 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
148 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
149 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
150 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
151 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
152 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
153 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
154 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
155 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
156 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
165 static void hmac_sha1_pad (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5], u32x opad[5])
167 w0[0] = w0[0] ^ 0x36363636;
168 w0[1] = w0[1] ^ 0x36363636;
169 w0[2] = w0[2] ^ 0x36363636;
170 w0[3] = w0[3] ^ 0x36363636;
171 w1[0] = w1[0] ^ 0x36363636;
172 w1[1] = w1[1] ^ 0x36363636;
173 w1[2] = w1[2] ^ 0x36363636;
174 w1[3] = w1[3] ^ 0x36363636;
175 w2[0] = w2[0] ^ 0x36363636;
176 w2[1] = w2[1] ^ 0x36363636;
177 w2[2] = w2[2] ^ 0x36363636;
178 w2[3] = w2[3] ^ 0x36363636;
179 w3[0] = w3[0] ^ 0x36363636;
180 w3[1] = w3[1] ^ 0x36363636;
181 w3[2] = w3[2] ^ 0x36363636;
182 w3[3] = w3[3] ^ 0x36363636;
190 sha1_transform (w0, w1, w2, w3, ipad);
192 w0[0] = w0[0] ^ 0x6a6a6a6a;
193 w0[1] = w0[1] ^ 0x6a6a6a6a;
194 w0[2] = w0[2] ^ 0x6a6a6a6a;
195 w0[3] = w0[3] ^ 0x6a6a6a6a;
196 w1[0] = w1[0] ^ 0x6a6a6a6a;
197 w1[1] = w1[1] ^ 0x6a6a6a6a;
198 w1[2] = w1[2] ^ 0x6a6a6a6a;
199 w1[3] = w1[3] ^ 0x6a6a6a6a;
200 w2[0] = w2[0] ^ 0x6a6a6a6a;
201 w2[1] = w2[1] ^ 0x6a6a6a6a;
202 w2[2] = w2[2] ^ 0x6a6a6a6a;
203 w2[3] = w2[3] ^ 0x6a6a6a6a;
204 w3[0] = w3[0] ^ 0x6a6a6a6a;
205 w3[1] = w3[1] ^ 0x6a6a6a6a;
206 w3[2] = w3[2] ^ 0x6a6a6a6a;
207 w3[3] = w3[3] ^ 0x6a6a6a6a;
215 sha1_transform (w0, w1, w2, w3, opad);
218 static void hmac_sha1_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5], u32x opad[5], u32x digest[5])
226 sha1_transform (w0, w1, w2, w3, digest);
243 w3[3] = (64 + 20) * 8;
251 sha1_transform (w0, w1, w2, w3, digest);
254 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m12000_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pbkdf2_sha1_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global pbkdf2_sha1_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
260 const u32 gid = get_global_id (0);
262 if (gid >= gid_max) return;
266 w0[0] = swap_workaround (pws[gid].i[ 0]);
267 w0[1] = swap_workaround (pws[gid].i[ 1]);
268 w0[2] = swap_workaround (pws[gid].i[ 2]);
269 w0[3] = swap_workaround (pws[gid].i[ 3]);
273 w1[0] = swap_workaround (pws[gid].i[ 4]);
274 w1[1] = swap_workaround (pws[gid].i[ 5]);
275 w1[2] = swap_workaround (pws[gid].i[ 6]);
276 w1[3] = swap_workaround (pws[gid].i[ 7]);
280 w2[0] = swap_workaround (pws[gid].i[ 8]);
281 w2[1] = swap_workaround (pws[gid].i[ 9]);
282 w2[2] = swap_workaround (pws[gid].i[10]);
283 w2[3] = swap_workaround (pws[gid].i[11]);
287 w3[0] = swap_workaround (pws[gid].i[12]);
288 w3[1] = swap_workaround (pws[gid].i[13]);
289 w3[2] = swap_workaround (pws[gid].i[14]);
290 w3[3] = swap_workaround (pws[gid].i[15]);
296 const u32 salt_len = salt_bufs[salt_pos].salt_len;
303 esalt_buf0[0] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 0]);
304 esalt_buf0[1] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 1]);
305 esalt_buf0[2] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 2]);
306 esalt_buf0[3] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 3]);
307 esalt_buf1[0] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 4]);
308 esalt_buf1[1] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 5]);
309 esalt_buf1[2] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 6]);
310 esalt_buf1[3] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 7]);
311 esalt_buf2[0] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 8]);
312 esalt_buf2[1] = swap_workaround (esalt_bufs[salt_pos].salt_buf[ 9]);
313 esalt_buf2[2] = swap_workaround (esalt_bufs[salt_pos].salt_buf[10]);
314 esalt_buf2[3] = swap_workaround (esalt_bufs[salt_pos].salt_buf[11]);
315 esalt_buf3[0] = swap_workaround (esalt_bufs[salt_pos].salt_buf[12]);
316 esalt_buf3[1] = swap_workaround (esalt_bufs[salt_pos].salt_buf[13]);
318 esalt_buf3[3] = (64 + salt_len + 4) * 8;
323 hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
325 tmps[gid].ipad[0] = ipad[0];
326 tmps[gid].ipad[1] = ipad[1];
327 tmps[gid].ipad[2] = ipad[2];
328 tmps[gid].ipad[3] = ipad[3];
329 tmps[gid].ipad[4] = ipad[4];
331 tmps[gid].opad[0] = opad[0];
332 tmps[gid].opad[1] = opad[1];
333 tmps[gid].opad[2] = opad[2];
334 tmps[gid].opad[3] = opad[3];
335 tmps[gid].opad[4] = opad[4];
337 for (u32 i = 0, j = 1; i < 5; i += 5, j += 1)
341 hmac_sha1_run (esalt_buf0, esalt_buf1, esalt_buf2, esalt_buf3, ipad, opad, dgst);
343 tmps[gid].dgst[i + 0] = dgst[0];
344 tmps[gid].dgst[i + 1] = dgst[1];
345 tmps[gid].dgst[i + 2] = dgst[2];
346 tmps[gid].dgst[i + 3] = dgst[3];
347 tmps[gid].dgst[i + 4] = dgst[4];
349 tmps[gid].out[i + 0] = dgst[0];
350 tmps[gid].out[i + 1] = dgst[1];
351 tmps[gid].out[i + 2] = dgst[2];
352 tmps[gid].out[i + 3] = dgst[3];
353 tmps[gid].out[i + 4] = dgst[4];
357 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m12000_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pbkdf2_sha1_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global pbkdf2_sha1_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
359 const u32 gid = get_global_id (0);
361 if (gid >= gid_max) return;
365 ipad[0] = tmps[gid].ipad[0];
366 ipad[1] = tmps[gid].ipad[1];
367 ipad[2] = tmps[gid].ipad[2];
368 ipad[3] = tmps[gid].ipad[3];
369 ipad[4] = tmps[gid].ipad[4];
373 opad[0] = tmps[gid].opad[0];
374 opad[1] = tmps[gid].opad[1];
375 opad[2] = tmps[gid].opad[2];
376 opad[3] = tmps[gid].opad[3];
377 opad[4] = tmps[gid].opad[4];
379 for (u32 i = 0; i < 5; i += 5)
383 dgst[0] = tmps[gid].dgst[i + 0];
384 dgst[1] = tmps[gid].dgst[i + 1];
385 dgst[2] = tmps[gid].dgst[i + 2];
386 dgst[3] = tmps[gid].dgst[i + 3];
387 dgst[4] = tmps[gid].dgst[i + 4];
391 out[0] = tmps[gid].out[i + 0];
392 out[1] = tmps[gid].out[i + 1];
393 out[2] = tmps[gid].out[i + 2];
394 out[3] = tmps[gid].out[i + 3];
395 out[4] = tmps[gid].out[i + 4];
397 for (u32 j = 0; j < loop_cnt; j++)
419 w3[3] = (64 + 20) * 8;
421 hmac_sha1_run (w0, w1, w2, w3, ipad, opad, dgst);
430 tmps[gid].dgst[i + 0] = dgst[0];
431 tmps[gid].dgst[i + 1] = dgst[1];
432 tmps[gid].dgst[i + 2] = dgst[2];
433 tmps[gid].dgst[i + 3] = dgst[3];
434 tmps[gid].dgst[i + 4] = dgst[4];
436 tmps[gid].out[i + 0] = out[0];
437 tmps[gid].out[i + 1] = out[1];
438 tmps[gid].out[i + 2] = out[2];
439 tmps[gid].out[i + 3] = out[3];
440 tmps[gid].out[i + 4] = out[4];
444 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m12000_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global pbkdf2_sha1_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global pbkdf2_sha1_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
450 const u32 gid = get_global_id (0);
452 if (gid >= gid_max) return;
454 const u32 lid = get_local_id (0);
456 const u32x r0 = tmps[gid].out[DGST_R0];
457 const u32x r1 = tmps[gid].out[DGST_R1];
458 const u32x r2 = tmps[gid].out[DGST_R2];
459 const u32x r3 = tmps[gid].out[DGST_R3];
463 #include VECT_COMPARE_M