2 * Author......: Jens Steube <jens.steube@gmail.com>
8 //incompatible data-dependant code
9 //#define NEW_SIMD_CODE
11 #include "include/constants.h"
12 #include "include/kernel_vendor.h"
19 #include "include/kernel_functions.c"
20 #include "OpenCL/types_ocl.c"
21 #include "OpenCL/common.c"
22 #include "include/rp_kernel.h"
23 #include "OpenCL/rp.c"
24 #include "OpenCL/simd.c"
26 #define GETSHIFTEDINT(a,n) amd_bytealign ((a)[((n)/4)+1], (a)[((n)/4)+0], (n))
28 #define SETSHIFTEDINT(a,n,v) \
30 const u32 s = ((n) & 3) * 8; \
31 const u64 x = (u64) (v) << s; \
32 (a)[((n)/4)+0] |= x; \
33 (a)[((n)/4)+1] = x >> 32; \
36 __constant u32 theMagicArray[64] =
38 0x1451ac91,0x4354679f,0xe03be724,0xc27b7428,0xeb133386,0x5ccb4f5a,0x37730a08,0x2f1c5d0e,
39 0xe5e68f33,0xddae9bf8,0x8d4bf216,0xdcd4e12c,0x9ddfcbb0,0x176d70d4,0x3f424df9,0x94111b9b,
40 0x9bc15b9f,0x039d0506,0x8a135e9d,0xe86a9a1e,0x17147cd9,0xf62ac758,0x0a6399a1,0xc370fdd7,
41 0x13745ef6,0x040bc903,0x26f79826,0x2593928a,0x230da2b0,0x6d7963ed,0x3cfa3213,0xa39a0235,
42 0x0a8eddb3,0xc351bf24,0x9f55cd7c,0x4c94af37,0x82520829,0x374e3bb2,0x9107179f,0xcdfd3b11,
43 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0
48 void swap_buffer (u32 final[16])
50 final[ 0] = swap32 (final[ 0]);
51 final[ 1] = swap32 (final[ 1]);
52 final[ 2] = swap32 (final[ 2]);
53 final[ 3] = swap32 (final[ 3]);
54 final[ 4] = swap32 (final[ 4]);
55 final[ 5] = swap32 (final[ 5]);
56 final[ 6] = swap32 (final[ 6]);
57 final[ 7] = swap32 (final[ 7]);
58 final[ 8] = swap32 (final[ 8]);
59 final[ 9] = swap32 (final[ 9]);
60 final[10] = swap32 (final[10]);
61 final[11] = swap32 (final[11]);
62 final[12] = swap32 (final[12]);
63 final[13] = swap32 (final[13]);
64 final[14] = swap32 (final[14]);
65 final[15] = swap32 (final[15]);
68 void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5])
96 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
97 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
98 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
99 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
100 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
101 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
102 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
103 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
104 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
105 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
106 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
107 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
108 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
109 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
110 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
111 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
112 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
113 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
114 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
115 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
120 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
121 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
122 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
123 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
124 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
125 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
126 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
127 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
128 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
129 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
130 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
131 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
132 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
133 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
134 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
135 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
136 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
137 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
138 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
139 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
144 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
145 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
146 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
147 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
148 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
149 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
150 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
151 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
152 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
153 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
154 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
155 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
156 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
157 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
158 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
159 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
160 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
161 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
162 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
163 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
168 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
169 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
170 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
171 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
172 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
173 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
174 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
175 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
176 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
177 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
178 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
179 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
180 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
181 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
182 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
183 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
184 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
185 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
186 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
187 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
196 __kernel void m07800_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
202 const u32 lid = get_local_id (0);
208 const u32 gid = get_global_id (0);
210 if (gid >= gid_max) return;
215 pw_buf0[0] = pws[gid].i[0];
216 pw_buf0[1] = pws[gid].i[1];
217 pw_buf0[2] = pws[gid].i[2];
218 pw_buf0[3] = pws[gid].i[3];
219 pw_buf1[0] = pws[gid].i[4];
220 pw_buf1[1] = pws[gid].i[5];
221 pw_buf1[2] = pws[gid].i[6];
222 pw_buf1[3] = pws[gid].i[7];
224 const u32 pw_len = pws[gid].pw_len;
232 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
233 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
234 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
235 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
236 salt_buf[4] = salt_bufs[salt_pos].salt_buf[4];
237 salt_buf[5] = salt_bufs[salt_pos].salt_buf[5];
238 salt_buf[6] = salt_bufs[salt_pos].salt_buf[6];
239 salt_buf[7] = salt_bufs[salt_pos].salt_buf[7];
241 const u32 salt_len = salt_bufs[salt_pos].salt_len;
247 for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE)
254 const u32x out_len = apply_rules_vect (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w0, w1);
282 switch_buffer_by_offset_le_VV (s0, s1, s2, s3, out_len);
284 const u32x pw_salt_len = out_len + salt_len;
292 final[ 0] = swap32 (w0[0] | s0[0]);
293 final[ 1] = swap32 (w0[1] | s0[1]);
294 final[ 2] = swap32 (w0[2] | s0[2]);
295 final[ 3] = swap32 (w0[3] | s0[3]);
296 final[ 4] = swap32 (w1[0] | s1[0]);
297 final[ 5] = swap32 (w1[1] | s1[1]);
298 final[ 6] = swap32 (w1[2] | s1[2]);
299 final[ 7] = swap32 (w1[3] | s1[3]);
300 final[ 8] = swap32 (w2[0] | s2[0]);
301 final[ 9] = swap32 (w2[1] | s2[1]);
302 final[10] = swap32 (w2[2] | s2[2]);
303 final[11] = swap32 (w2[3] | s2[3]);
304 final[12] = swap32 (w3[0] | s3[0]);
305 final[13] = swap32 (w3[1] | s3[1]);
307 final[15] = pw_salt_len * 8;
317 sha1_transform (&final[0], &final[4], &final[8], &final[12], digest);
319 // prepare magic array range
321 u32 lengthMagicArray = 0x20;
322 u32 offsetMagicArray = 0;
324 lengthMagicArray += ((digest[0] >> 24) & 0xff) % 6;
325 lengthMagicArray += ((digest[0] >> 16) & 0xff) % 6;
326 lengthMagicArray += ((digest[0] >> 8) & 0xff) % 6;
327 lengthMagicArray += ((digest[0] >> 0) & 0xff) % 6;
328 lengthMagicArray += ((digest[1] >> 24) & 0xff) % 6;
329 lengthMagicArray += ((digest[1] >> 16) & 0xff) % 6;
330 lengthMagicArray += ((digest[1] >> 8) & 0xff) % 6;
331 lengthMagicArray += ((digest[1] >> 0) & 0xff) % 6;
332 lengthMagicArray += ((digest[2] >> 24) & 0xff) % 6;
333 lengthMagicArray += ((digest[2] >> 16) & 0xff) % 6;
334 offsetMagicArray += ((digest[2] >> 8) & 0xff) % 8;
335 offsetMagicArray += ((digest[2] >> 0) & 0xff) % 8;
336 offsetMagicArray += ((digest[3] >> 24) & 0xff) % 8;
337 offsetMagicArray += ((digest[3] >> 16) & 0xff) % 8;
338 offsetMagicArray += ((digest[3] >> 8) & 0xff) % 8;
339 offsetMagicArray += ((digest[3] >> 0) & 0xff) % 8;
340 offsetMagicArray += ((digest[4] >> 24) & 0xff) % 8;
341 offsetMagicArray += ((digest[4] >> 16) & 0xff) % 8;
342 offsetMagicArray += ((digest[4] >> 8) & 0xff) % 8;
343 offsetMagicArray += ((digest[4] >> 0) & 0xff) % 8;
354 for (int i = 0; i < 32; i++) final[i] = 0;
365 u32 final_len = out_len;
371 for (i = 0; i < lengthMagicArray - 4; i += 4)
373 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i);
375 SETSHIFTEDINT (final, final_len + i, tmp);
378 const u32 mask = 0xffffffff >> (((i - lengthMagicArray) & 3) * 8);
380 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i) & mask;
382 SETSHIFTEDINT (final, final_len + i, tmp);
384 final_len += lengthMagicArray;
388 for (i = 0; i < salt_len + 1; i += 4) // +1 for the 0x80
390 const u32 tmp = salt_buf[i / 4]; // attention, int[] not char[]
392 SETSHIFTEDINT (final, final_len + i, tmp);
395 final_len += salt_len;
402 for (left = final_len, off = 0; left >= 56; left -= 64, off += 16)
404 swap_buffer (&final[off]);
406 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
409 swap_buffer (&final[off]);
412 final[off + 15] = final_len * 8;
414 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
416 COMPARE_M_SIMD (digest[3], digest[4], digest[2], digest[1]);
420 __kernel void m07800_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
424 __kernel void m07800_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
428 __kernel void m07800_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
434 const u32 lid = get_local_id (0);
440 const u32 gid = get_global_id (0);
442 if (gid >= gid_max) return;
447 pw_buf0[0] = pws[gid].i[0];
448 pw_buf0[1] = pws[gid].i[1];
449 pw_buf0[2] = pws[gid].i[2];
450 pw_buf0[3] = pws[gid].i[3];
451 pw_buf1[0] = pws[gid].i[4];
452 pw_buf1[1] = pws[gid].i[5];
453 pw_buf1[2] = pws[gid].i[6];
454 pw_buf1[3] = pws[gid].i[7];
456 const u32 pw_len = pws[gid].pw_len;
464 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
465 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
466 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
467 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
468 salt_buf[4] = salt_bufs[salt_pos].salt_buf[4];
469 salt_buf[5] = salt_bufs[salt_pos].salt_buf[5];
470 salt_buf[6] = salt_bufs[salt_pos].salt_buf[6];
471 salt_buf[7] = salt_bufs[salt_pos].salt_buf[7];
473 const u32 salt_len = salt_bufs[salt_pos].salt_len;
479 const u32 search[4] =
481 digests_buf[digests_offset].digest_buf[DGST_R0],
482 digests_buf[digests_offset].digest_buf[DGST_R1],
483 digests_buf[digests_offset].digest_buf[DGST_R2],
484 digests_buf[digests_offset].digest_buf[DGST_R3]
491 for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE)
498 const u32x out_len = apply_rules_vect (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w0, w1);
526 switch_buffer_by_offset_le_VV (s0, s1, s2, s3, out_len);
528 const u32x pw_salt_len = out_len + salt_len;
536 final[ 0] = swap32 (w0[0] | s0[0]);
537 final[ 1] = swap32 (w0[1] | s0[1]);
538 final[ 2] = swap32 (w0[2] | s0[2]);
539 final[ 3] = swap32 (w0[3] | s0[3]);
540 final[ 4] = swap32 (w1[0] | s1[0]);
541 final[ 5] = swap32 (w1[1] | s1[1]);
542 final[ 6] = swap32 (w1[2] | s1[2]);
543 final[ 7] = swap32 (w1[3] | s1[3]);
544 final[ 8] = swap32 (w2[0] | s2[0]);
545 final[ 9] = swap32 (w2[1] | s2[1]);
546 final[10] = swap32 (w2[2] | s2[2]);
547 final[11] = swap32 (w2[3] | s2[3]);
548 final[12] = swap32 (w3[0] | s3[0]);
549 final[13] = swap32 (w3[1] | s3[1]);
551 final[15] = pw_salt_len * 8;
561 sha1_transform (&final[0], &final[4], &final[8], &final[12], digest);
563 // prepare magic array range
565 u32 lengthMagicArray = 0x20;
566 u32 offsetMagicArray = 0;
568 lengthMagicArray += ((digest[0] >> 24) & 0xff) % 6;
569 lengthMagicArray += ((digest[0] >> 16) & 0xff) % 6;
570 lengthMagicArray += ((digest[0] >> 8) & 0xff) % 6;
571 lengthMagicArray += ((digest[0] >> 0) & 0xff) % 6;
572 lengthMagicArray += ((digest[1] >> 24) & 0xff) % 6;
573 lengthMagicArray += ((digest[1] >> 16) & 0xff) % 6;
574 lengthMagicArray += ((digest[1] >> 8) & 0xff) % 6;
575 lengthMagicArray += ((digest[1] >> 0) & 0xff) % 6;
576 lengthMagicArray += ((digest[2] >> 24) & 0xff) % 6;
577 lengthMagicArray += ((digest[2] >> 16) & 0xff) % 6;
578 offsetMagicArray += ((digest[2] >> 8) & 0xff) % 8;
579 offsetMagicArray += ((digest[2] >> 0) & 0xff) % 8;
580 offsetMagicArray += ((digest[3] >> 24) & 0xff) % 8;
581 offsetMagicArray += ((digest[3] >> 16) & 0xff) % 8;
582 offsetMagicArray += ((digest[3] >> 8) & 0xff) % 8;
583 offsetMagicArray += ((digest[3] >> 0) & 0xff) % 8;
584 offsetMagicArray += ((digest[4] >> 24) & 0xff) % 8;
585 offsetMagicArray += ((digest[4] >> 16) & 0xff) % 8;
586 offsetMagicArray += ((digest[4] >> 8) & 0xff) % 8;
587 offsetMagicArray += ((digest[4] >> 0) & 0xff) % 8;
598 for (int i = 0; i < 32; i++) final[i] = 0;
609 u32 final_len = out_len;
615 for (i = 0; i < lengthMagicArray - 4; i += 4)
617 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i);
619 SETSHIFTEDINT (final, final_len + i, tmp);
622 const u32 mask = 0xffffffff >> (((i - lengthMagicArray) & 3) * 8);
624 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i) & mask;
626 SETSHIFTEDINT (final, final_len + i, tmp);
628 final_len += lengthMagicArray;
632 for (i = 0; i < salt_len + 1; i += 4) // +1 for the 0x80
634 const u32 tmp = salt_buf[i / 4]; // attention, int[] not char[]
636 SETSHIFTEDINT (final, final_len + i, tmp);
639 final_len += salt_len;
646 for (left = final_len, off = 0; left >= 56; left -= 64, off += 16)
648 swap_buffer (&final[off]);
650 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
653 swap_buffer (&final[off]);
656 final[off + 15] = final_len * 8;
658 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
660 COMPARE_S_SIMD (digest[3], digest[4], digest[2], digest[1]);
664 __kernel void m07800_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
668 __kernel void m07800_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)