2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
16 #include "include/kernel_functions.c"
17 #include "types_ocl.c"
20 #define COMPARE_S "check_single_comp4.c"
21 #define COMPARE_M "check_multi_comp4.c"
23 #define uint_to_hex_lower8_le(i) l_bin2asc[(i)]
25 static void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5])
53 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
54 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
55 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
56 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
57 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
58 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
59 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
60 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
61 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
62 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
63 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
64 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
65 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
66 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
67 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
68 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
69 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
70 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
71 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
72 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
77 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
78 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
79 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
80 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
81 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
82 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
83 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
84 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
85 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
86 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
87 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
88 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
89 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
90 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
91 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
92 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
93 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
94 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
95 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
96 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
101 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
102 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
103 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
104 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
105 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
106 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
107 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
108 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
109 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
110 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
111 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
112 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
113 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
114 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
115 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
116 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
117 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
118 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
119 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
120 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
125 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
126 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
127 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
128 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
129 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
130 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
131 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
132 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
133 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
134 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
135 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
136 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
137 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
138 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
139 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
140 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
141 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
142 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
143 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
144 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
153 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
159 const u32 lid = get_local_id (0);
165 const u32 gid = get_global_id (0);
169 wordl0[0] = pws[gid].i[ 0];
170 wordl0[1] = pws[gid].i[ 1];
171 wordl0[2] = pws[gid].i[ 2];
172 wordl0[3] = pws[gid].i[ 3];
176 wordl1[0] = pws[gid].i[ 4];
177 wordl1[1] = pws[gid].i[ 5];
178 wordl1[2] = pws[gid].i[ 6];
179 wordl1[3] = pws[gid].i[ 7];
195 const u32 pw_l_len = pws[gid].pw_len;
197 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
199 append_0x80_2x4 (wordl0, wordl1, pw_l_len);
201 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len);
208 __local u32 l_bin2asc[256];
210 const u32 lid4 = lid * 4;
212 const u32 lid40 = lid4 + 0;
213 const u32 lid41 = lid4 + 1;
214 const u32 lid42 = lid4 + 2;
215 const u32 lid43 = lid4 + 3;
217 const u32 v400 = (lid40 >> 0) & 15;
218 const u32 v401 = (lid40 >> 4) & 15;
219 const u32 v410 = (lid41 >> 0) & 15;
220 const u32 v411 = (lid41 >> 4) & 15;
221 const u32 v420 = (lid42 >> 0) & 15;
222 const u32 v421 = (lid42 >> 4) & 15;
223 const u32 v430 = (lid43 >> 0) & 15;
224 const u32 v431 = (lid43 >> 4) & 15;
226 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
227 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
228 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
229 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
230 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
231 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
232 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
233 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
235 barrier (CLK_LOCAL_MEM_FENCE);
237 if (gid >= gid_max) return;
245 salt_buf0[0] = swap32 (salt_bufs[salt_pos].salt_buf[ 0]);
246 salt_buf0[1] = swap32 (salt_bufs[salt_pos].salt_buf[ 1]);
247 salt_buf0[2] = swap32 (salt_bufs[salt_pos].salt_buf[ 2]);
248 salt_buf0[3] = swap32 (salt_bufs[salt_pos].salt_buf[ 3]);
252 salt_buf1[0] = swap32 (salt_bufs[salt_pos].salt_buf[ 4]);
253 salt_buf1[1] = swap32 (salt_bufs[salt_pos].salt_buf[ 5]);
254 salt_buf1[2] = swap32 (salt_bufs[salt_pos].salt_buf[ 6]);
255 salt_buf1[3] = swap32 (salt_bufs[salt_pos].salt_buf[ 7]);
259 salt_buf2[0] = swap32 (salt_bufs[salt_pos].salt_buf[ 8]);
260 salt_buf2[1] = swap32 (salt_bufs[salt_pos].salt_buf[ 9]);
264 const u32 salt_len = salt_bufs[salt_pos].salt_len;
270 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
272 const u32 pw_r_len = combs_buf[il_pos].pw_len;
274 const u32 pw_len = pw_l_len + pw_r_len;
278 wordr0[0] = combs_buf[il_pos].i[0];
279 wordr0[1] = combs_buf[il_pos].i[1];
280 wordr0[2] = combs_buf[il_pos].i[2];
281 wordr0[3] = combs_buf[il_pos].i[3];
285 wordr1[0] = combs_buf[il_pos].i[4];
286 wordr1[1] = combs_buf[il_pos].i[5];
287 wordr1[2] = combs_buf[il_pos].i[6];
288 wordr1[3] = combs_buf[il_pos].i[7];
304 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
306 append_0x80_2x4 (wordr0, wordr1, pw_r_len);
308 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
313 w0[0] = wordl0[0] | wordr0[0];
314 w0[1] = wordl0[1] | wordr0[1];
315 w0[2] = wordl0[2] | wordr0[2];
316 w0[3] = wordl0[3] | wordr0[3];
320 w1[0] = wordl1[0] | wordr1[0];
321 w1[1] = wordl1[1] | wordr1[1];
322 w1[2] = wordl1[2] | wordr1[2];
323 w1[3] = wordl1[3] | wordr1[3];
327 w2[0] = wordl2[0] | wordr2[0];
328 w2[1] = wordl2[1] | wordr2[1];
329 w2[2] = wordl2[2] | wordr2[2];
330 w2[3] = wordl2[3] | wordr2[3];
334 w3[0] = wordl3[0] | wordr3[0];
335 w3[1] = wordl3[1] | wordr3[1];
336 w3[2] = wordl3[2] | wordr3[2];
337 w3[3] = wordl3[3] | wordr3[3];
341 w0_t[0] = swap32 (w0[0]);
342 w0_t[1] = swap32 (w0[1]);
343 w0_t[2] = swap32 (w0[2]);
344 w0_t[3] = swap32 (w0[3]);
348 w1_t[0] = swap32 (w1[0]);
349 w1_t[1] = swap32 (w1[1]);
350 w1_t[2] = swap32 (w1[2]);
351 w1_t[3] = swap32 (w1[3]);
355 w2_t[0] = swap32 (w2[0]);
356 w2_t[1] = swap32 (w2[1]);
357 w2_t[2] = swap32 (w2[2]);
358 w2_t[3] = swap32 (w2[3]);
362 w3_t[0] = swap32 (w3[0]);
363 w3_t[1] = swap32 (w3[1]);
365 w3_t[3] = pw_len * 8;
375 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
389 w0_t[0] = salt_buf0[0];
390 w0_t[1] = salt_buf0[1];
391 w0_t[2] = salt_buf0[2];
392 w0_t[3] = salt_buf0[3];
393 w1_t[0] = salt_buf1[0];
394 w1_t[1] = salt_buf1[1];
395 w1_t[2] = salt_buf1[2];
396 w1_t[3] = salt_buf1[3];
397 w2_t[0] = salt_buf2[0];
398 w2_t[1] = salt_buf2[1];
399 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
400 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
401 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
402 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
403 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
404 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
405 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
406 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
407 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
408 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
409 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
410 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
418 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
420 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
421 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
422 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
423 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
424 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
425 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
426 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
427 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
428 w1_t[0] = 0x80000000;
441 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
449 w0_t[0] = salt_buf0[0];
450 w0_t[1] = salt_buf0[1];
451 w0_t[2] = salt_buf0[2];
452 w0_t[3] = salt_buf0[3];
453 w1_t[0] = salt_buf1[0];
454 w1_t[1] = salt_buf1[1];
455 w1_t[2] = salt_buf1[2];
456 w1_t[3] = salt_buf1[3];
457 w2_t[0] = salt_buf2[0];
458 w2_t[1] = salt_buf2[1];
459 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
460 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
461 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
462 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
463 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
464 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
465 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
466 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
467 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
468 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
469 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
470 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
478 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
480 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
481 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
482 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
483 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
484 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
485 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
486 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
487 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
488 w1_t[0] = 0x80000000;
501 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
503 const u32 r0 = digest[3];
504 const u32 r1 = digest[4];
505 const u32 r2 = digest[2];
506 const u32 r3 = digest[1];
512 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
516 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
520 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
526 const u32 lid = get_local_id (0);
532 const u32 gid = get_global_id (0);
536 wordl0[0] = pws[gid].i[ 0];
537 wordl0[1] = pws[gid].i[ 1];
538 wordl0[2] = pws[gid].i[ 2];
539 wordl0[3] = pws[gid].i[ 3];
543 wordl1[0] = pws[gid].i[ 4];
544 wordl1[1] = pws[gid].i[ 5];
545 wordl1[2] = pws[gid].i[ 6];
546 wordl1[3] = pws[gid].i[ 7];
562 const u32 pw_l_len = pws[gid].pw_len;
564 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
566 append_0x80_2x4 (wordl0, wordl1, pw_l_len);
568 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, combs_buf[0].pw_len);
575 __local u32 l_bin2asc[256];
577 const u32 lid4 = lid * 4;
579 const u32 lid40 = lid4 + 0;
580 const u32 lid41 = lid4 + 1;
581 const u32 lid42 = lid4 + 2;
582 const u32 lid43 = lid4 + 3;
584 const u32 v400 = (lid40 >> 0) & 15;
585 const u32 v401 = (lid40 >> 4) & 15;
586 const u32 v410 = (lid41 >> 0) & 15;
587 const u32 v411 = (lid41 >> 4) & 15;
588 const u32 v420 = (lid42 >> 0) & 15;
589 const u32 v421 = (lid42 >> 4) & 15;
590 const u32 v430 = (lid43 >> 0) & 15;
591 const u32 v431 = (lid43 >> 4) & 15;
593 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 0
594 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 8;
595 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 0
596 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 8;
597 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 0
598 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 8;
599 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 0
600 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 8;
602 barrier (CLK_LOCAL_MEM_FENCE);
604 if (gid >= gid_max) return;
612 salt_buf0[0] = swap32 (salt_bufs[salt_pos].salt_buf[ 0]);
613 salt_buf0[1] = swap32 (salt_bufs[salt_pos].salt_buf[ 1]);
614 salt_buf0[2] = swap32 (salt_bufs[salt_pos].salt_buf[ 2]);
615 salt_buf0[3] = swap32 (salt_bufs[salt_pos].salt_buf[ 3]);
619 salt_buf1[0] = swap32 (salt_bufs[salt_pos].salt_buf[ 4]);
620 salt_buf1[1] = swap32 (salt_bufs[salt_pos].salt_buf[ 5]);
621 salt_buf1[2] = swap32 (salt_bufs[salt_pos].salt_buf[ 6]);
622 salt_buf1[3] = swap32 (salt_bufs[salt_pos].salt_buf[ 7]);
626 salt_buf2[0] = swap32 (salt_bufs[salt_pos].salt_buf[ 8]);
627 salt_buf2[1] = swap32 (salt_bufs[salt_pos].salt_buf[ 9]);
631 const u32 salt_len = salt_bufs[salt_pos].salt_len;
637 const u32 search[4] =
639 digests_buf[digests_offset].digest_buf[DGST_R0],
640 digests_buf[digests_offset].digest_buf[DGST_R1],
641 digests_buf[digests_offset].digest_buf[DGST_R2],
642 digests_buf[digests_offset].digest_buf[DGST_R3]
649 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
651 const u32 pw_r_len = combs_buf[il_pos].pw_len;
653 const u32 pw_len = pw_l_len + pw_r_len;
657 wordr0[0] = combs_buf[il_pos].i[0];
658 wordr0[1] = combs_buf[il_pos].i[1];
659 wordr0[2] = combs_buf[il_pos].i[2];
660 wordr0[3] = combs_buf[il_pos].i[3];
664 wordr1[0] = combs_buf[il_pos].i[4];
665 wordr1[1] = combs_buf[il_pos].i[5];
666 wordr1[2] = combs_buf[il_pos].i[6];
667 wordr1[3] = combs_buf[il_pos].i[7];
683 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
685 append_0x80_2x4 (wordr0, wordr1, pw_r_len);
687 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
692 w0[0] = wordl0[0] | wordr0[0];
693 w0[1] = wordl0[1] | wordr0[1];
694 w0[2] = wordl0[2] | wordr0[2];
695 w0[3] = wordl0[3] | wordr0[3];
699 w1[0] = wordl1[0] | wordr1[0];
700 w1[1] = wordl1[1] | wordr1[1];
701 w1[2] = wordl1[2] | wordr1[2];
702 w1[3] = wordl1[3] | wordr1[3];
706 w2[0] = wordl2[0] | wordr2[0];
707 w2[1] = wordl2[1] | wordr2[1];
708 w2[2] = wordl2[2] | wordr2[2];
709 w2[3] = wordl2[3] | wordr2[3];
713 w3[0] = wordl3[0] | wordr3[0];
714 w3[1] = wordl3[1] | wordr3[1];
715 w3[2] = wordl3[2] | wordr3[2];
716 w3[3] = wordl3[3] | wordr3[3];
720 w0_t[0] = swap32 (w0[0]);
721 w0_t[1] = swap32 (w0[1]);
722 w0_t[2] = swap32 (w0[2]);
723 w0_t[3] = swap32 (w0[3]);
727 w1_t[0] = swap32 (w1[0]);
728 w1_t[1] = swap32 (w1[1]);
729 w1_t[2] = swap32 (w1[2]);
730 w1_t[3] = swap32 (w1[3]);
734 w2_t[0] = swap32 (w2[0]);
735 w2_t[1] = swap32 (w2[1]);
736 w2_t[2] = swap32 (w2[2]);
737 w2_t[3] = swap32 (w2[3]);
741 w3_t[0] = swap32 (w3[0]);
742 w3_t[1] = swap32 (w3[1]);
744 w3_t[3] = pw_len * 8;
754 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
768 w0_t[0] = salt_buf0[0];
769 w0_t[1] = salt_buf0[1];
770 w0_t[2] = salt_buf0[2];
771 w0_t[3] = salt_buf0[3];
772 w1_t[0] = salt_buf1[0];
773 w1_t[1] = salt_buf1[1];
774 w1_t[2] = salt_buf1[2];
775 w1_t[3] = salt_buf1[3];
776 w2_t[0] = salt_buf2[0];
777 w2_t[1] = salt_buf2[1];
778 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
779 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
780 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
781 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
782 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
783 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
784 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
785 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
786 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
787 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
788 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
789 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
797 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
799 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
800 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
801 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
802 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
803 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
804 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
805 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
806 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
807 w1_t[0] = 0x80000000;
818 w3_t[3] = (salt_len + 40) * 8;
820 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
828 w0_t[0] = salt_buf0[0];
829 w0_t[1] = salt_buf0[1];
830 w0_t[2] = salt_buf0[2];
831 w0_t[3] = salt_buf0[3];
832 w1_t[0] = salt_buf1[0];
833 w1_t[1] = salt_buf1[1];
834 w1_t[2] = salt_buf1[2];
835 w1_t[3] = salt_buf1[3];
836 w2_t[0] = salt_buf2[0];
837 w2_t[1] = salt_buf2[1];
838 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
839 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
840 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
841 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
842 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
843 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
844 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
845 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
846 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
847 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
848 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
849 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
857 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
859 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
860 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
861 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
862 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
863 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
864 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
865 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
866 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
867 w1_t[0] = 0x80000000;
878 w3_t[3] = (salt_len + 40) * 8;
880 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
882 const u32 r0 = digest[3];
883 const u32 r1 = digest[4];
884 const u32 r2 = digest[2];
885 const u32 r3 = digest[1];
891 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
895 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08400_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)