2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
37 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
41 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
44 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
72 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
73 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
74 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
75 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
76 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
77 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
78 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
79 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
80 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
81 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
82 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
83 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
84 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
85 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
86 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
87 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
88 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
89 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
90 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
91 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
96 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
97 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
98 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
99 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
100 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
101 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
102 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
103 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
104 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
105 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
106 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
107 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
108 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
109 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
110 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
111 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
112 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
113 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
114 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
115 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
120 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
121 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
122 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
123 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
124 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
125 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
126 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
127 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
128 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
129 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
130 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
131 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
132 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
133 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
134 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
135 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
136 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
137 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
138 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
139 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
144 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
145 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
146 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
147 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
148 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
149 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
150 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
151 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
152 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
153 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
154 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
155 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
156 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
157 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
158 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
159 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
160 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
161 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
162 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
163 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
172 static void hmac_sha1_pad (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5], u32x opad[5])
174 w0[0] = w0[0] ^ 0x36363636;
175 w0[1] = w0[1] ^ 0x36363636;
176 w0[2] = w0[2] ^ 0x36363636;
177 w0[3] = w0[3] ^ 0x36363636;
178 w1[0] = w1[0] ^ 0x36363636;
179 w1[1] = w1[1] ^ 0x36363636;
180 w1[2] = w1[2] ^ 0x36363636;
181 w1[3] = w1[3] ^ 0x36363636;
182 w2[0] = w2[0] ^ 0x36363636;
183 w2[1] = w2[1] ^ 0x36363636;
184 w2[2] = w2[2] ^ 0x36363636;
185 w2[3] = w2[3] ^ 0x36363636;
186 w3[0] = w3[0] ^ 0x36363636;
187 w3[1] = w3[1] ^ 0x36363636;
188 w3[2] = w3[2] ^ 0x36363636;
189 w3[3] = w3[3] ^ 0x36363636;
197 sha1_transform (w0, w1, w2, w3, ipad);
199 w0[0] = w0[0] ^ 0x6a6a6a6a;
200 w0[1] = w0[1] ^ 0x6a6a6a6a;
201 w0[2] = w0[2] ^ 0x6a6a6a6a;
202 w0[3] = w0[3] ^ 0x6a6a6a6a;
203 w1[0] = w1[0] ^ 0x6a6a6a6a;
204 w1[1] = w1[1] ^ 0x6a6a6a6a;
205 w1[2] = w1[2] ^ 0x6a6a6a6a;
206 w1[3] = w1[3] ^ 0x6a6a6a6a;
207 w2[0] = w2[0] ^ 0x6a6a6a6a;
208 w2[1] = w2[1] ^ 0x6a6a6a6a;
209 w2[2] = w2[2] ^ 0x6a6a6a6a;
210 w2[3] = w2[3] ^ 0x6a6a6a6a;
211 w3[0] = w3[0] ^ 0x6a6a6a6a;
212 w3[1] = w3[1] ^ 0x6a6a6a6a;
213 w3[2] = w3[2] ^ 0x6a6a6a6a;
214 w3[3] = w3[3] ^ 0x6a6a6a6a;
222 sha1_transform (w0, w1, w2, w3, opad);
225 static void hmac_sha1_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5], u32x opad[5], u32x digest[5])
233 sha1_transform (w0, w1, w2, w3, digest);
250 w3[3] = (64 + 20) * 8;
258 sha1_transform (w0, w1, w2, w3, digest);
261 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06700_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha1aix_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global wpa_t *wpa_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
267 const u32 gid = get_global_id (0);
269 if (gid >= gid_max) return;
273 w0[0] = pws[gid].i[ 0];
274 w0[1] = pws[gid].i[ 1];
275 w0[2] = pws[gid].i[ 2];
276 w0[3] = pws[gid].i[ 3];
280 w1[0] = pws[gid].i[ 4];
281 w1[1] = pws[gid].i[ 5];
282 w1[2] = pws[gid].i[ 6];
283 w1[3] = pws[gid].i[ 7];
287 w2[0] = pws[gid].i[ 8];
288 w2[1] = pws[gid].i[ 9];
289 w2[2] = pws[gid].i[10];
290 w2[3] = pws[gid].i[11];
294 w3[0] = pws[gid].i[12];
295 w3[1] = pws[gid].i[13];
296 w3[2] = pws[gid].i[14];
297 w3[3] = pws[gid].i[15];
303 u32 salt_len = salt_bufs[salt_pos].salt_len;
307 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
308 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
309 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
310 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
314 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
315 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
316 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
317 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
321 salt_buf2[0] = salt_bufs[salt_pos].salt_buf[ 8];
322 salt_buf2[1] = salt_bufs[salt_pos].salt_buf[ 9];
323 salt_buf2[2] = salt_bufs[salt_pos].salt_buf[10];
324 salt_buf2[3] = salt_bufs[salt_pos].salt_buf[11];
333 append_0x01_4 (salt_buf0, salt_buf1, salt_buf2, salt_buf3, salt_len + 3);
335 append_0x80_4 (salt_buf0, salt_buf1, salt_buf2, salt_buf3, salt_len + 4);
341 w0[0] = swap_workaround (w0[0]);
342 w0[1] = swap_workaround (w0[1]);
343 w0[2] = swap_workaround (w0[2]);
344 w0[3] = swap_workaround (w0[3]);
345 w1[0] = swap_workaround (w1[0]);
346 w1[1] = swap_workaround (w1[1]);
347 w1[2] = swap_workaround (w1[2]);
348 w1[3] = swap_workaround (w1[3]);
349 w2[0] = swap_workaround (w2[0]);
350 w2[1] = swap_workaround (w2[1]);
351 w2[2] = swap_workaround (w2[2]);
352 w2[3] = swap_workaround (w2[3]);
353 w3[0] = swap_workaround (w3[0]);
354 w3[1] = swap_workaround (w3[1]);
355 w3[2] = swap_workaround (w3[2]);
356 w3[3] = swap_workaround (w3[3]);
361 hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
363 tmps[gid].ipad[0] = ipad[0];
364 tmps[gid].ipad[1] = ipad[1];
365 tmps[gid].ipad[2] = ipad[2];
366 tmps[gid].ipad[3] = ipad[3];
367 tmps[gid].ipad[4] = ipad[4];
369 tmps[gid].opad[0] = opad[0];
370 tmps[gid].opad[1] = opad[1];
371 tmps[gid].opad[2] = opad[2];
372 tmps[gid].opad[3] = opad[3];
373 tmps[gid].opad[4] = opad[4];
375 w0[0] = salt_buf0[0];
376 w0[1] = salt_buf0[1];
377 w0[2] = salt_buf0[2];
378 w0[3] = salt_buf0[3];
379 w1[0] = salt_buf1[0];
380 w1[1] = salt_buf1[1];
381 w1[2] = salt_buf1[2];
382 w1[3] = salt_buf1[3];
383 w2[0] = salt_buf2[0];
384 w2[1] = salt_buf2[1];
385 w2[2] = salt_buf2[2];
386 w2[3] = salt_buf2[3];
387 w3[0] = salt_buf3[0];
388 w3[1] = salt_buf3[1];
389 w3[2] = salt_buf3[2];
390 //w3[3] = salt_buf3[3];
392 w0[0] = swap_workaround (w0[0]);
393 w0[1] = swap_workaround (w0[1]);
394 w0[2] = swap_workaround (w0[2]);
395 w0[3] = swap_workaround (w0[3]);
396 w1[0] = swap_workaround (w1[0]);
397 w1[1] = swap_workaround (w1[1]);
398 w1[2] = swap_workaround (w1[2]);
399 w1[3] = swap_workaround (w1[3]);
400 w2[0] = swap_workaround (w2[0]);
401 w2[1] = swap_workaround (w2[1]);
402 w2[2] = swap_workaround (w2[2]);
403 w2[3] = swap_workaround (w2[3]);
404 w3[0] = swap_workaround (w3[0]);
405 w3[1] = swap_workaround (w3[1]);
406 w3[2] = swap_workaround (w3[2]);
407 w3[3] = (64 + salt_len + 4) * 8;
411 hmac_sha1_run (w0, w1, w2, w3, ipad, opad, dgst);
413 tmps[gid].dgst[0] = dgst[0];
414 tmps[gid].dgst[1] = dgst[1];
415 tmps[gid].dgst[2] = dgst[2];
416 tmps[gid].dgst[3] = dgst[3];
417 tmps[gid].dgst[4] = dgst[4];
419 tmps[gid].out[0] = dgst[0];
420 tmps[gid].out[1] = dgst[1];
421 tmps[gid].out[2] = dgst[2];
422 tmps[gid].out[3] = dgst[3];
423 tmps[gid].out[4] = dgst[4];
426 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06700_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha1aix_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global wpa_t *wpa_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
428 const u32 gid = get_global_id (0);
430 if (gid >= gid_max) return;
435 ipad[0] = tmps[gid].ipad[0];
436 ipad[1] = tmps[gid].ipad[1];
437 ipad[2] = tmps[gid].ipad[2];
438 ipad[3] = tmps[gid].ipad[3];
439 ipad[4] = tmps[gid].ipad[4];
441 opad[0] = tmps[gid].opad[0];
442 opad[1] = tmps[gid].opad[1];
443 opad[2] = tmps[gid].opad[2];
444 opad[3] = tmps[gid].opad[3];
445 opad[4] = tmps[gid].opad[4];
450 dgst[0] = tmps[gid].dgst[0];
451 dgst[1] = tmps[gid].dgst[1];
452 dgst[2] = tmps[gid].dgst[2];
453 dgst[3] = tmps[gid].dgst[3];
454 dgst[4] = tmps[gid].dgst[4];
456 out[0] = tmps[gid].out[0];
457 out[1] = tmps[gid].out[1];
458 out[2] = tmps[gid].out[2];
459 out[3] = tmps[gid].out[3];
460 out[4] = tmps[gid].out[4];
462 for (u32 j = 0; j < loop_cnt; j++)
484 w3[3] = (64 + 20) * 8;
486 hmac_sha1_run (w0, w1, w2, w3, ipad, opad, dgst);
495 tmps[gid].dgst[0] = dgst[0];
496 tmps[gid].dgst[1] = dgst[1];
497 tmps[gid].dgst[2] = dgst[2];
498 tmps[gid].dgst[3] = dgst[3];
499 tmps[gid].dgst[4] = dgst[4];
501 tmps[gid].out[0] = out[0];
502 tmps[gid].out[1] = out[1];
503 tmps[gid].out[2] = out[2];
504 tmps[gid].out[3] = out[3];
505 tmps[gid].out[4] = out[4];
508 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m06700_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global sha1aix_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global wpa_t *wpa_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
514 const u32 gid = get_global_id (0);
516 if (gid >= gid_max) return;
518 const u32 lid = get_local_id (0);
525 u32x a = tmps[gid].out[0];
526 u32x b = tmps[gid].out[1];
527 u32x c = tmps[gid].out[2];
528 u32x d = tmps[gid].out[3];
529 u32x e = tmps[gid].out[4] & 0xffff03ff;
532 const u32x r0 = tmps[gid].out[DGST_R0];
533 const u32x r1 = tmps[gid].out[DGST_R1];
534 const u32x r2 = tmps[gid].out[DGST_R2];
535 const u32x r3 = tmps[gid].out[DGST_R3];
539 #include VECT_COMPARE_M