2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
34 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
38 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
39 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
43 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
44 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
48 #define uint_to_hex_lower8_le(i) l_bin2asc[(i)]
52 #define uint_to_hex_lower8_le(i) u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
56 #define uint_to_hex_lower8_le(i) u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
59 static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
87 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
88 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
89 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
90 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
91 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
92 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
93 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
94 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
95 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
96 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
97 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
98 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
99 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
100 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
101 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
102 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
103 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
104 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
105 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
106 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
111 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
112 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
113 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
114 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
115 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
116 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
117 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
118 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
119 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
120 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
121 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
122 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
123 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
124 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
125 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
126 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
127 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
128 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
129 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
130 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
135 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
136 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
137 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
138 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
139 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
140 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
141 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
142 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
143 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
144 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
145 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
146 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
147 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
148 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
149 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
150 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
151 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
152 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
153 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
154 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
159 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
160 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
161 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
162 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
163 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
164 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
165 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
166 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
167 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
168 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
169 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
170 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
171 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
172 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
173 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
174 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
175 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
176 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
177 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
178 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
187 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
193 const u32 lid = get_local_id (0);
199 const u32 gid = get_global_id (0);
203 wordl0[0] = pws[gid].i[ 0];
204 wordl0[1] = pws[gid].i[ 1];
205 wordl0[2] = pws[gid].i[ 2];
206 wordl0[3] = pws[gid].i[ 3];
210 wordl1[0] = pws[gid].i[ 4];
211 wordl1[1] = pws[gid].i[ 5];
212 wordl1[2] = pws[gid].i[ 6];
213 wordl1[3] = pws[gid].i[ 7];
229 const u32 pw_l_len = pws[gid].pw_len;
231 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
233 append_0x80_2 (wordl0, wordl1, pw_l_len);
235 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len);
242 __local u32 l_bin2asc[256];
244 const u32 lid4 = lid * 4;
246 const u32 lid40 = lid4 + 0;
247 const u32 lid41 = lid4 + 1;
248 const u32 lid42 = lid4 + 2;
249 const u32 lid43 = lid4 + 3;
251 const u32 v400 = (lid40 >> 0) & 15;
252 const u32 v401 = (lid40 >> 4) & 15;
253 const u32 v410 = (lid41 >> 0) & 15;
254 const u32 v411 = (lid41 >> 4) & 15;
255 const u32 v420 = (lid42 >> 0) & 15;
256 const u32 v421 = (lid42 >> 4) & 15;
257 const u32 v430 = (lid43 >> 0) & 15;
258 const u32 v431 = (lid43 >> 4) & 15;
260 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
261 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
262 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
263 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
264 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
265 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
266 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
267 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
269 barrier (CLK_LOCAL_MEM_FENCE);
271 if (gid >= gid_max) return;
279 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
280 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
281 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
282 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
286 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
287 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
288 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
289 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
293 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
294 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
298 const u32 salt_len = salt_bufs[salt_pos].salt_len;
304 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
306 const u32 pw_r_len = combs_buf[il_pos].pw_len;
308 const u32 pw_len = pw_l_len + pw_r_len;
312 wordr0[0] = combs_buf[il_pos].i[0];
313 wordr0[1] = combs_buf[il_pos].i[1];
314 wordr0[2] = combs_buf[il_pos].i[2];
315 wordr0[3] = combs_buf[il_pos].i[3];
319 wordr1[0] = combs_buf[il_pos].i[4];
320 wordr1[1] = combs_buf[il_pos].i[5];
321 wordr1[2] = combs_buf[il_pos].i[6];
322 wordr1[3] = combs_buf[il_pos].i[7];
338 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
340 append_0x80_2 (wordr0, wordr1, pw_r_len);
342 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
347 w0[0] = wordl0[0] | wordr0[0];
348 w0[1] = wordl0[1] | wordr0[1];
349 w0[2] = wordl0[2] | wordr0[2];
350 w0[3] = wordl0[3] | wordr0[3];
354 w1[0] = wordl1[0] | wordr1[0];
355 w1[1] = wordl1[1] | wordr1[1];
356 w1[2] = wordl1[2] | wordr1[2];
357 w1[3] = wordl1[3] | wordr1[3];
361 w2[0] = wordl2[0] | wordr2[0];
362 w2[1] = wordl2[1] | wordr2[1];
363 w2[2] = wordl2[2] | wordr2[2];
364 w2[3] = wordl2[3] | wordr2[3];
368 w3[0] = wordl3[0] | wordr3[0];
369 w3[1] = wordl3[1] | wordr3[1];
370 w3[2] = wordl3[2] | wordr3[2];
371 w3[3] = wordl3[3] | wordr3[3];
375 w0_t[0] = swap_workaround (w0[0]);
376 w0_t[1] = swap_workaround (w0[1]);
377 w0_t[2] = swap_workaround (w0[2]);
378 w0_t[3] = swap_workaround (w0[3]);
382 w1_t[0] = swap_workaround (w1[0]);
383 w1_t[1] = swap_workaround (w1[1]);
384 w1_t[2] = swap_workaround (w1[2]);
385 w1_t[3] = swap_workaround (w1[3]);
389 w2_t[0] = swap_workaround (w2[0]);
390 w2_t[1] = swap_workaround (w2[1]);
391 w2_t[2] = swap_workaround (w2[2]);
392 w2_t[3] = swap_workaround (w2[3]);
396 w3_t[0] = swap_workaround (w3[0]);
397 w3_t[1] = swap_workaround (w3[1]);
399 w3_t[3] = pw_len * 8;
409 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
423 w0_t[0] = salt_buf0[0];
424 w0_t[1] = salt_buf0[1];
425 w0_t[2] = salt_buf0[2];
426 w0_t[3] = salt_buf0[3];
427 w1_t[0] = salt_buf1[0];
428 w1_t[1] = salt_buf1[1];
429 w1_t[2] = salt_buf1[2];
430 w1_t[3] = salt_buf1[3];
431 w2_t[0] = salt_buf2[0];
432 w2_t[1] = salt_buf2[1];
433 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
434 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
435 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
436 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
437 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
438 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
439 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
440 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
441 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
442 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
443 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
444 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
452 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
454 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
455 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
456 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
457 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
458 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
459 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
460 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
461 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
462 w1_t[0] = 0x80000000;
475 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
483 w0_t[0] = salt_buf0[0];
484 w0_t[1] = salt_buf0[1];
485 w0_t[2] = salt_buf0[2];
486 w0_t[3] = salt_buf0[3];
487 w1_t[0] = salt_buf1[0];
488 w1_t[1] = salt_buf1[1];
489 w1_t[2] = salt_buf1[2];
490 w1_t[3] = salt_buf1[3];
491 w2_t[0] = salt_buf2[0];
492 w2_t[1] = salt_buf2[1];
493 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
494 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
495 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
496 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
497 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
498 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
499 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
500 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
501 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
502 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
503 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
504 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
512 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
514 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
515 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
516 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
517 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
518 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
519 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
520 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
521 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
522 w1_t[0] = 0x80000000;
535 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
537 const u32x r0 = digest[3];
538 const u32x r1 = digest[4];
539 const u32x r2 = digest[2];
540 const u32x r3 = digest[1];
542 #include VECT_COMPARE_M
546 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
550 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
554 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
560 const u32 lid = get_local_id (0);
566 const u32 gid = get_global_id (0);
570 wordl0[0] = pws[gid].i[ 0];
571 wordl0[1] = pws[gid].i[ 1];
572 wordl0[2] = pws[gid].i[ 2];
573 wordl0[3] = pws[gid].i[ 3];
577 wordl1[0] = pws[gid].i[ 4];
578 wordl1[1] = pws[gid].i[ 5];
579 wordl1[2] = pws[gid].i[ 6];
580 wordl1[3] = pws[gid].i[ 7];
596 const u32 pw_l_len = pws[gid].pw_len;
598 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
600 append_0x80_2 (wordl0, wordl1, pw_l_len);
602 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len);
609 __local u32 l_bin2asc[256];
611 const u32 lid4 = lid * 4;
613 const u32 lid40 = lid4 + 0;
614 const u32 lid41 = lid4 + 1;
615 const u32 lid42 = lid4 + 2;
616 const u32 lid43 = lid4 + 3;
618 const u32 v400 = (lid40 >> 0) & 15;
619 const u32 v401 = (lid40 >> 4) & 15;
620 const u32 v410 = (lid41 >> 0) & 15;
621 const u32 v411 = (lid41 >> 4) & 15;
622 const u32 v420 = (lid42 >> 0) & 15;
623 const u32 v421 = (lid42 >> 4) & 15;
624 const u32 v430 = (lid43 >> 0) & 15;
625 const u32 v431 = (lid43 >> 4) & 15;
627 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
628 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
629 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
630 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
631 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
632 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
633 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
634 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
636 barrier (CLK_LOCAL_MEM_FENCE);
638 if (gid >= gid_max) return;
646 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
647 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
648 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
649 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
653 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
654 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
655 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
656 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
660 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
661 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
665 const u32 salt_len = salt_bufs[salt_pos].salt_len;
671 const u32 search[4] =
673 digests_buf[digests_offset].digest_buf[DGST_R0],
674 digests_buf[digests_offset].digest_buf[DGST_R1],
675 digests_buf[digests_offset].digest_buf[DGST_R2],
676 digests_buf[digests_offset].digest_buf[DGST_R3]
683 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
685 const u32 pw_r_len = combs_buf[il_pos].pw_len;
687 const u32 pw_len = pw_l_len + pw_r_len;
691 wordr0[0] = combs_buf[il_pos].i[0];
692 wordr0[1] = combs_buf[il_pos].i[1];
693 wordr0[2] = combs_buf[il_pos].i[2];
694 wordr0[3] = combs_buf[il_pos].i[3];
698 wordr1[0] = combs_buf[il_pos].i[4];
699 wordr1[1] = combs_buf[il_pos].i[5];
700 wordr1[2] = combs_buf[il_pos].i[6];
701 wordr1[3] = combs_buf[il_pos].i[7];
717 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
719 append_0x80_2 (wordr0, wordr1, pw_r_len);
721 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
726 w0[0] = wordl0[0] | wordr0[0];
727 w0[1] = wordl0[1] | wordr0[1];
728 w0[2] = wordl0[2] | wordr0[2];
729 w0[3] = wordl0[3] | wordr0[3];
733 w1[0] = wordl1[0] | wordr1[0];
734 w1[1] = wordl1[1] | wordr1[1];
735 w1[2] = wordl1[2] | wordr1[2];
736 w1[3] = wordl1[3] | wordr1[3];
740 w2[0] = wordl2[0] | wordr2[0];
741 w2[1] = wordl2[1] | wordr2[1];
742 w2[2] = wordl2[2] | wordr2[2];
743 w2[3] = wordl2[3] | wordr2[3];
747 w3[0] = wordl3[0] | wordr3[0];
748 w3[1] = wordl3[1] | wordr3[1];
749 w3[2] = wordl3[2] | wordr3[2];
750 w3[3] = wordl3[3] | wordr3[3];
754 w0_t[0] = swap_workaround (w0[0]);
755 w0_t[1] = swap_workaround (w0[1]);
756 w0_t[2] = swap_workaround (w0[2]);
757 w0_t[3] = swap_workaround (w0[3]);
761 w1_t[0] = swap_workaround (w1[0]);
762 w1_t[1] = swap_workaround (w1[1]);
763 w1_t[2] = swap_workaround (w1[2]);
764 w1_t[3] = swap_workaround (w1[3]);
768 w2_t[0] = swap_workaround (w2[0]);
769 w2_t[1] = swap_workaround (w2[1]);
770 w2_t[2] = swap_workaround (w2[2]);
771 w2_t[3] = swap_workaround (w2[3]);
775 w3_t[0] = swap_workaround (w3[0]);
776 w3_t[1] = swap_workaround (w3[1]);
778 w3_t[3] = pw_len * 8;
788 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
802 w0_t[0] = salt_buf0[0];
803 w0_t[1] = salt_buf0[1];
804 w0_t[2] = salt_buf0[2];
805 w0_t[3] = salt_buf0[3];
806 w1_t[0] = salt_buf1[0];
807 w1_t[1] = salt_buf1[1];
808 w1_t[2] = salt_buf1[2];
809 w1_t[3] = salt_buf1[3];
810 w2_t[0] = salt_buf2[0];
811 w2_t[1] = salt_buf2[1];
812 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
813 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
814 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
815 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
816 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
817 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
818 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
819 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
820 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
821 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
822 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
823 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
831 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
833 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
834 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
835 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
836 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
837 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
838 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
839 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
840 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
841 w1_t[0] = 0x80000000;
852 w3_t[3] = (salt_len + 40) * 8;
854 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
862 w0_t[0] = salt_buf0[0];
863 w0_t[1] = salt_buf0[1];
864 w0_t[2] = salt_buf0[2];
865 w0_t[3] = salt_buf0[3];
866 w1_t[0] = salt_buf1[0];
867 w1_t[1] = salt_buf1[1];
868 w1_t[2] = salt_buf1[2];
869 w1_t[3] = salt_buf1[3];
870 w2_t[0] = salt_buf2[0];
871 w2_t[1] = salt_buf2[1];
872 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
873 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
874 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
875 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
876 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
877 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
878 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
879 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
880 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
881 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
882 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
883 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
891 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
893 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
894 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
895 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
896 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
897 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
898 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
899 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
900 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
901 w1_t[0] = 0x80000000;
912 w3_t[3] = (salt_len + 40) * 8;
914 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
916 const u32x r0 = digest[3];
917 const u32x r1 = digest[4];
918 const u32x r2 = digest[2];
919 const u32x r3 = digest[1];
921 #include VECT_COMPARE_S
925 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
929 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)