2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
34 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
38 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
39 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
43 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
44 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
48 #define uint_to_hex_lower8_le(i) l_bin2asc[(i)]
52 #define uint_to_hex_lower8_le(i) u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
56 #define uint_to_hex_lower8_le(i) u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
59 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
87 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
88 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
89 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
90 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
91 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
92 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
93 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
94 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
95 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
96 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
97 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
98 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
99 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
100 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
101 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
102 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
103 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
104 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
105 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
106 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
111 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
112 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
113 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
114 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
115 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
116 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
117 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
118 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
119 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
120 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
121 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
122 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
123 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
124 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
125 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
126 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
127 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
128 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
129 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
130 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
135 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
136 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
137 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
138 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
139 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
140 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
141 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
142 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
143 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
144 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
145 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
146 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
147 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
148 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
149 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
150 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
151 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
152 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
153 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
154 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
159 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
160 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
161 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
162 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
163 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
164 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
165 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
166 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
167 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
168 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
169 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
170 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
171 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
172 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
173 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
174 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
175 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
176 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
177 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
178 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
187 static void m08400m (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 l_bin2asc[256])
193 const u32 gid = get_global_id (0);
194 const u32 lid = get_local_id (0);
202 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
203 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
204 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
205 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
209 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
210 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
211 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
212 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
216 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
217 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
221 const u32 salt_len = salt_bufs[salt_pos].salt_len;
229 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
231 const u32 w0r = bfs_buf[il_pos].i;
261 w3_t[3] = pw_len * 8;
271 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
285 w0_t[0] = salt_buf0[0];
286 w0_t[1] = salt_buf0[1];
287 w0_t[2] = salt_buf0[2];
288 w0_t[3] = salt_buf0[3];
289 w1_t[0] = salt_buf1[0];
290 w1_t[1] = salt_buf1[1];
291 w1_t[2] = salt_buf1[2];
292 w1_t[3] = salt_buf1[3];
293 w2_t[0] = salt_buf2[0];
294 w2_t[1] = salt_buf2[1];
295 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
296 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
297 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
298 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
299 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
300 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
301 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
302 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
303 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
304 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
305 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
306 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
314 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
316 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
317 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
318 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
319 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
320 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
321 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
322 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
323 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
324 w1_t[0] = 0x80000000;
337 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
345 w0_t[0] = salt_buf0[0];
346 w0_t[1] = salt_buf0[1];
347 w0_t[2] = salt_buf0[2];
348 w0_t[3] = salt_buf0[3];
349 w1_t[0] = salt_buf1[0];
350 w1_t[1] = salt_buf1[1];
351 w1_t[2] = salt_buf1[2];
352 w1_t[3] = salt_buf1[3];
353 w2_t[0] = salt_buf2[0];
354 w2_t[1] = salt_buf2[1];
355 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
356 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
357 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
358 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
359 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
360 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
361 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
362 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
363 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
364 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
365 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
366 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
374 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
376 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
377 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
378 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
379 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
380 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
381 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
382 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
383 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
384 w1_t[0] = 0x80000000;
397 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
399 const u32x r0 = digest[3];
400 const u32x r1 = digest[4];
401 const u32x r2 = digest[2];
402 const u32x r3 = digest[1];
404 #include VECT_COMPARE_M
408 static void m08400s (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 l_bin2asc[256])
414 const u32 gid = get_global_id (0);
415 const u32 lid = get_local_id (0);
421 const u32 search[4] =
423 digests_buf[digests_offset].digest_buf[DGST_R0],
424 digests_buf[digests_offset].digest_buf[DGST_R1],
425 digests_buf[digests_offset].digest_buf[DGST_R2],
426 digests_buf[digests_offset].digest_buf[DGST_R3]
435 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
436 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
437 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
438 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
442 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
443 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
444 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
445 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
449 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
450 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
454 const u32 salt_len = salt_bufs[salt_pos].salt_len;
462 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
464 const u32 w0r = bfs_buf[il_pos].i;
494 w3_t[3] = pw_len * 8;
504 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
518 w0_t[0] = salt_buf0[0];
519 w0_t[1] = salt_buf0[1];
520 w0_t[2] = salt_buf0[2];
521 w0_t[3] = salt_buf0[3];
522 w1_t[0] = salt_buf1[0];
523 w1_t[1] = salt_buf1[1];
524 w1_t[2] = salt_buf1[2];
525 w1_t[3] = salt_buf1[3];
526 w2_t[0] = salt_buf2[0];
527 w2_t[1] = salt_buf2[1];
528 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
529 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
530 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
531 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
532 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
533 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
534 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
535 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
536 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
537 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
538 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
539 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
547 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
549 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
550 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
551 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
552 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
553 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
554 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
555 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
556 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
557 w1_t[0] = 0x80000000;
568 w3_t[3] = (salt_len + 40) * 8;
570 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
578 w0_t[0] = salt_buf0[0];
579 w0_t[1] = salt_buf0[1];
580 w0_t[2] = salt_buf0[2];
581 w0_t[3] = salt_buf0[3];
582 w1_t[0] = salt_buf1[0];
583 w1_t[1] = salt_buf1[1];
584 w1_t[2] = salt_buf1[2];
585 w1_t[3] = salt_buf1[3];
586 w2_t[0] = salt_buf2[0];
587 w2_t[1] = salt_buf2[1];
588 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
589 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
590 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
591 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
592 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
593 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
594 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
595 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
596 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
597 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
598 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
599 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
607 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
609 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
610 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
611 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
612 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
613 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
614 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
615 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
616 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
617 w1_t[0] = 0x80000000;
628 w3_t[3] = (salt_len + 40) * 8;
630 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
632 const u32x r0 = digest[3];
633 const u32x r1 = digest[4];
634 const u32x r2 = digest[2];
635 const u32x r3 = digest[1];
637 #include VECT_COMPARE_S
641 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
647 const u32 gid = get_global_id (0);
648 const u32 lid = get_local_id (0);
652 w0[0] = pws[gid].i[ 0];
653 w0[1] = pws[gid].i[ 1];
654 w0[2] = pws[gid].i[ 2];
655 w0[3] = pws[gid].i[ 3];
678 const u32 pw_len = pws[gid].pw_len;
684 __local u32 l_bin2asc[256];
686 const u32 lid4 = lid * 4;
688 const u32 lid40 = lid4 + 0;
689 const u32 lid41 = lid4 + 1;
690 const u32 lid42 = lid4 + 2;
691 const u32 lid43 = lid4 + 3;
693 const u32 v400 = (lid40 >> 0) & 15;
694 const u32 v401 = (lid40 >> 4) & 15;
695 const u32 v410 = (lid41 >> 0) & 15;
696 const u32 v411 = (lid41 >> 4) & 15;
697 const u32 v420 = (lid42 >> 0) & 15;
698 const u32 v421 = (lid42 >> 4) & 15;
699 const u32 v430 = (lid43 >> 0) & 15;
700 const u32 v431 = (lid43 >> 4) & 15;
702 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
703 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
704 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
705 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
706 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
707 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
708 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
709 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
711 barrier (CLK_LOCAL_MEM_FENCE);
713 if (gid >= gid_max) return;
719 m08400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
722 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
728 const u32 gid = get_global_id (0);
729 const u32 lid = get_local_id (0);
733 w0[0] = pws[gid].i[ 0];
734 w0[1] = pws[gid].i[ 1];
735 w0[2] = pws[gid].i[ 2];
736 w0[3] = pws[gid].i[ 3];
740 w1[0] = pws[gid].i[ 4];
741 w1[1] = pws[gid].i[ 5];
742 w1[2] = pws[gid].i[ 6];
743 w1[3] = pws[gid].i[ 7];
759 const u32 pw_len = pws[gid].pw_len;
765 __local u32 l_bin2asc[256];
767 const u32 lid4 = lid * 4;
769 const u32 lid40 = lid4 + 0;
770 const u32 lid41 = lid4 + 1;
771 const u32 lid42 = lid4 + 2;
772 const u32 lid43 = lid4 + 3;
774 const u32 v400 = (lid40 >> 0) & 15;
775 const u32 v401 = (lid40 >> 4) & 15;
776 const u32 v410 = (lid41 >> 0) & 15;
777 const u32 v411 = (lid41 >> 4) & 15;
778 const u32 v420 = (lid42 >> 0) & 15;
779 const u32 v421 = (lid42 >> 4) & 15;
780 const u32 v430 = (lid43 >> 0) & 15;
781 const u32 v431 = (lid43 >> 4) & 15;
783 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
784 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
785 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
786 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
787 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
788 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
789 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
790 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
792 barrier (CLK_LOCAL_MEM_FENCE);
794 if (gid >= gid_max) return;
800 m08400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
803 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
809 const u32 gid = get_global_id (0);
810 const u32 lid = get_local_id (0);
814 w0[0] = pws[gid].i[ 0];
815 w0[1] = pws[gid].i[ 1];
816 w0[2] = pws[gid].i[ 2];
817 w0[3] = pws[gid].i[ 3];
821 w1[0] = pws[gid].i[ 4];
822 w1[1] = pws[gid].i[ 5];
823 w1[2] = pws[gid].i[ 6];
824 w1[3] = pws[gid].i[ 7];
828 w2[0] = pws[gid].i[ 8];
829 w2[1] = pws[gid].i[ 9];
830 w2[2] = pws[gid].i[10];
831 w2[3] = pws[gid].i[11];
835 w3[0] = pws[gid].i[12];
836 w3[1] = pws[gid].i[13];
840 const u32 pw_len = pws[gid].pw_len;
846 __local u32 l_bin2asc[256];
848 const u32 lid4 = lid * 4;
850 const u32 lid40 = lid4 + 0;
851 const u32 lid41 = lid4 + 1;
852 const u32 lid42 = lid4 + 2;
853 const u32 lid43 = lid4 + 3;
855 const u32 v400 = (lid40 >> 0) & 15;
856 const u32 v401 = (lid40 >> 4) & 15;
857 const u32 v410 = (lid41 >> 0) & 15;
858 const u32 v411 = (lid41 >> 4) & 15;
859 const u32 v420 = (lid42 >> 0) & 15;
860 const u32 v421 = (lid42 >> 4) & 15;
861 const u32 v430 = (lid43 >> 0) & 15;
862 const u32 v431 = (lid43 >> 4) & 15;
864 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
865 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
866 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
867 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
868 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
869 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
870 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
871 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
873 barrier (CLK_LOCAL_MEM_FENCE);
875 if (gid >= gid_max) return;
881 m08400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
884 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
890 const u32 gid = get_global_id (0);
891 const u32 lid = get_local_id (0);
895 w0[0] = pws[gid].i[ 0];
896 w0[1] = pws[gid].i[ 1];
897 w0[2] = pws[gid].i[ 2];
898 w0[3] = pws[gid].i[ 3];
921 const u32 pw_len = pws[gid].pw_len;
927 __local u32 l_bin2asc[256];
929 const u32 lid4 = lid * 4;
931 const u32 lid40 = lid4 + 0;
932 const u32 lid41 = lid4 + 1;
933 const u32 lid42 = lid4 + 2;
934 const u32 lid43 = lid4 + 3;
936 const u32 v400 = (lid40 >> 0) & 15;
937 const u32 v401 = (lid40 >> 4) & 15;
938 const u32 v410 = (lid41 >> 0) & 15;
939 const u32 v411 = (lid41 >> 4) & 15;
940 const u32 v420 = (lid42 >> 0) & 15;
941 const u32 v421 = (lid42 >> 4) & 15;
942 const u32 v430 = (lid43 >> 0) & 15;
943 const u32 v431 = (lid43 >> 4) & 15;
945 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
946 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
947 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
948 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
949 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
950 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
951 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
952 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
954 barrier (CLK_LOCAL_MEM_FENCE);
956 if (gid >= gid_max) return;
962 m08400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
965 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
971 const u32 gid = get_global_id (0);
972 const u32 lid = get_local_id (0);
976 w0[0] = pws[gid].i[ 0];
977 w0[1] = pws[gid].i[ 1];
978 w0[2] = pws[gid].i[ 2];
979 w0[3] = pws[gid].i[ 3];
983 w1[0] = pws[gid].i[ 4];
984 w1[1] = pws[gid].i[ 5];
985 w1[2] = pws[gid].i[ 6];
986 w1[3] = pws[gid].i[ 7];
1002 const u32 pw_len = pws[gid].pw_len;
1008 __local u32 l_bin2asc[256];
1010 const u32 lid4 = lid * 4;
1012 const u32 lid40 = lid4 + 0;
1013 const u32 lid41 = lid4 + 1;
1014 const u32 lid42 = lid4 + 2;
1015 const u32 lid43 = lid4 + 3;
1017 const u32 v400 = (lid40 >> 0) & 15;
1018 const u32 v401 = (lid40 >> 4) & 15;
1019 const u32 v410 = (lid41 >> 0) & 15;
1020 const u32 v411 = (lid41 >> 4) & 15;
1021 const u32 v420 = (lid42 >> 0) & 15;
1022 const u32 v421 = (lid42 >> 4) & 15;
1023 const u32 v430 = (lid43 >> 0) & 15;
1024 const u32 v431 = (lid43 >> 4) & 15;
1026 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
1027 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
1028 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
1029 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
1030 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
1031 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
1032 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
1033 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
1035 barrier (CLK_LOCAL_MEM_FENCE);
1037 if (gid >= gid_max) return;
1043 m08400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
1046 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
1052 const u32 gid = get_global_id (0);
1053 const u32 lid = get_local_id (0);
1057 w0[0] = pws[gid].i[ 0];
1058 w0[1] = pws[gid].i[ 1];
1059 w0[2] = pws[gid].i[ 2];
1060 w0[3] = pws[gid].i[ 3];
1064 w1[0] = pws[gid].i[ 4];
1065 w1[1] = pws[gid].i[ 5];
1066 w1[2] = pws[gid].i[ 6];
1067 w1[3] = pws[gid].i[ 7];
1071 w2[0] = pws[gid].i[ 8];
1072 w2[1] = pws[gid].i[ 9];
1073 w2[2] = pws[gid].i[10];
1074 w2[3] = pws[gid].i[11];
1078 w3[0] = pws[gid].i[12];
1079 w3[1] = pws[gid].i[13];
1083 const u32 pw_len = pws[gid].pw_len;
1089 __local u32 l_bin2asc[256];
1091 const u32 lid4 = lid * 4;
1093 const u32 lid40 = lid4 + 0;
1094 const u32 lid41 = lid4 + 1;
1095 const u32 lid42 = lid4 + 2;
1096 const u32 lid43 = lid4 + 3;
1098 const u32 v400 = (lid40 >> 0) & 15;
1099 const u32 v401 = (lid40 >> 4) & 15;
1100 const u32 v410 = (lid41 >> 0) & 15;
1101 const u32 v411 = (lid41 >> 4) & 15;
1102 const u32 v420 = (lid42 >> 0) & 15;
1103 const u32 v421 = (lid42 >> 4) & 15;
1104 const u32 v430 = (lid43 >> 0) & 15;
1105 const u32 v431 = (lid43 >> 4) & 15;
1107 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
1108 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
1109 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
1110 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
1111 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
1112 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
1113 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
1114 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
1116 barrier (CLK_LOCAL_MEM_FENCE);
1118 if (gid >= gid_max) return;
1124 m08400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);