2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
27 #include "include/rp_gpu.h"
31 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
32 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
35 __device__ static u32 bytealign (u32 src0, u32 src1, u32 src2)
37 return (u32) (((((u64)src0) << 32) | (u64)src1) >> ((src2 & 3)*8));
40 #if __CUDA_ARCH__ >= 350
41 #define GETSHIFTEDINT(a,n) __funnelshift_r ((a)[((n)/4)+0], (a)[((n)/4)+1], (n & 3) * 8)
42 #elif __CUDA_ARCH__ >= 200
43 #define GETSHIFTEDINT(a,n) __byte_perm ((a)[((n)/4)+0], (a)[((n)/4)+1], (0x76543210 >> ((n & 3) * 4)) & 0xffff)
45 #define GETSHIFTEDINT(a,n) bytealign ((a)[((n)/4)+1], (a)[((n)/4)+0], n)
48 #define SETSHIFTEDINT(a,n,v) \
50 const u32 s = ((n) & 3) * 8; \
51 const u64 x = (u64) (v) << s; \
52 (a)[((n)/4)+0] |= x; \
53 (a)[((n)/4)+1] = x >> 32; \
56 __device__ __constant__ u32 theMagicArray[64] =
58 0x1451ac91,0x4354679f,0xe03be724,0xc27b7428,0xeb133386,0x5ccb4f5a,0x37730a08,0x2f1c5d0e,
59 0xe5e68f33,0xddae9bf8,0x8d4bf216,0xdcd4e12c,0x9ddfcbb0,0x176d70d4,0x3f424df9,0x94111b9b,
60 0x9bc15b9f,0x039d0506,0x8a135e9d,0xe86a9a1e,0x17147cd9,0xf62ac758,0x0a6399a1,0xc370fdd7,
61 0x13745ef6,0x040bc903,0x26f79826,0x2593928a,0x230da2b0,0x6d7963ed,0x3cfa3213,0xa39a0235,
62 0x0a8eddb3,0xc351bf24,0x9f55cd7c,0x4c94af37,0x82520829,0x374e3bb2,0x9107179f,0xcdfd3b11,
63 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 0, 0, 0, 0, 0, 0
68 __device__ static void swap_buffer (u32x final[16])
70 final[ 0] = swap_workaround (final[ 0]);
71 final[ 1] = swap_workaround (final[ 1]);
72 final[ 2] = swap_workaround (final[ 2]);
73 final[ 3] = swap_workaround (final[ 3]);
74 final[ 4] = swap_workaround (final[ 4]);
75 final[ 5] = swap_workaround (final[ 5]);
76 final[ 6] = swap_workaround (final[ 6]);
77 final[ 7] = swap_workaround (final[ 7]);
78 final[ 8] = swap_workaround (final[ 8]);
79 final[ 9] = swap_workaround (final[ 9]);
80 final[10] = swap_workaround (final[10]);
81 final[11] = swap_workaround (final[11]);
82 final[12] = swap_workaround (final[12]);
83 final[13] = swap_workaround (final[13]);
84 final[14] = swap_workaround (final[14]);
85 final[15] = swap_workaround (final[15]);
88 __device__ static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
116 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
117 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
118 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
119 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
120 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
121 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
122 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
123 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
124 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
125 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
126 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
127 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
128 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
129 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
130 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
131 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
132 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
133 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
134 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
135 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
140 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
141 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
142 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
143 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
144 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
145 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
146 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
147 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
148 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
149 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
150 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
151 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
152 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
153 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
154 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
155 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
156 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
157 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
158 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
159 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
164 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
165 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
166 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
167 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
168 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
169 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
170 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
171 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
172 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
173 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
174 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
175 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
176 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
177 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
178 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
179 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
180 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
181 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
182 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
183 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
188 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
189 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
190 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
191 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
192 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
193 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
194 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
195 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
196 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
197 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
198 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
199 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
200 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
201 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
202 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
203 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
204 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
205 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
206 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
207 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
216 __device__ __constant__ gpu_rule_t c_rules[1024];
218 extern "C" __global__ void __launch_bounds__ (256, 1) m07800_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
224 const u32 lid = threadIdx.x;
230 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
232 if (gid >= gid_max) return;
236 pw_buf0[0] = pws[gid].i[ 0];
237 pw_buf0[1] = pws[gid].i[ 1];
238 pw_buf0[2] = pws[gid].i[ 2];
239 pw_buf0[3] = pws[gid].i[ 3];
243 pw_buf1[0] = pws[gid].i[ 4];
244 pw_buf1[1] = pws[gid].i[ 5];
245 pw_buf1[2] = pws[gid].i[ 6];
246 pw_buf1[3] = pws[gid].i[ 7];
248 const u32 pw_len = pws[gid].pw_len;
256 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
257 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
258 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
259 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
260 salt_buf[4] = salt_bufs[salt_pos].salt_buf[4];
261 salt_buf[5] = salt_bufs[salt_pos].salt_buf[5];
262 salt_buf[6] = salt_bufs[salt_pos].salt_buf[6];
263 salt_buf[7] = salt_bufs[salt_pos].salt_buf[7];
265 const u32 salt_len = salt_bufs[salt_pos].salt_len;
271 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
301 const u32 out_len = apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
335 switch_buffer_by_offset (s0, s1, s2, s3, out_len);
337 const u32 pw_salt_len = out_len + salt_len;
345 final[ 0] = swap_workaround (w0[0] | s0[0]);
346 final[ 1] = swap_workaround (w0[1] | s0[1]);
347 final[ 2] = swap_workaround (w0[2] | s0[2]);
348 final[ 3] = swap_workaround (w0[3] | s0[3]);
349 final[ 4] = swap_workaround (w1[0] | s1[0]);
350 final[ 5] = swap_workaround (w1[1] | s1[1]);
351 final[ 6] = swap_workaround (w1[2] | s1[2]);
352 final[ 7] = swap_workaround (w1[3] | s1[3]);
353 final[ 8] = swap_workaround (w2[0] | s2[0]);
354 final[ 9] = swap_workaround (w2[1] | s2[1]);
355 final[10] = swap_workaround (w2[2] | s2[2]);
356 final[11] = swap_workaround (w2[3] | s2[3]);
357 final[12] = swap_workaround (w3[0] | s3[0]);
358 final[13] = swap_workaround (w3[1] | s3[1]);
360 final[15] = pw_salt_len * 8;
370 sha1_transform (&final[0], &final[4], &final[8], &final[12], digest);
372 // prepare magic array range
374 u32x lengthMagicArray = 0x20;
375 u32x offsetMagicArray = 0;
377 lengthMagicArray += ((digest[0] >> 24) & 0xff) % 6;
378 lengthMagicArray += ((digest[0] >> 16) & 0xff) % 6;
379 lengthMagicArray += ((digest[0] >> 8) & 0xff) % 6;
380 lengthMagicArray += ((digest[0] >> 0) & 0xff) % 6;
381 lengthMagicArray += ((digest[1] >> 24) & 0xff) % 6;
382 lengthMagicArray += ((digest[1] >> 16) & 0xff) % 6;
383 lengthMagicArray += ((digest[1] >> 8) & 0xff) % 6;
384 lengthMagicArray += ((digest[1] >> 0) & 0xff) % 6;
385 lengthMagicArray += ((digest[2] >> 24) & 0xff) % 6;
386 lengthMagicArray += ((digest[2] >> 16) & 0xff) % 6;
387 offsetMagicArray += ((digest[2] >> 8) & 0xff) % 8;
388 offsetMagicArray += ((digest[2] >> 0) & 0xff) % 8;
389 offsetMagicArray += ((digest[3] >> 24) & 0xff) % 8;
390 offsetMagicArray += ((digest[3] >> 16) & 0xff) % 8;
391 offsetMagicArray += ((digest[3] >> 8) & 0xff) % 8;
392 offsetMagicArray += ((digest[3] >> 0) & 0xff) % 8;
393 offsetMagicArray += ((digest[4] >> 24) & 0xff) % 8;
394 offsetMagicArray += ((digest[4] >> 16) & 0xff) % 8;
395 offsetMagicArray += ((digest[4] >> 8) & 0xff) % 8;
396 offsetMagicArray += ((digest[4] >> 0) & 0xff) % 8;
407 for (int i = 0; i < 64; i++) final[i] = 0;
418 u32 final_len = out_len;
424 for (i = 0; i < lengthMagicArray - 4; i += 4)
426 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i);
428 SETSHIFTEDINT (final, final_len + i, tmp);
431 const u32 mask = 0xffffffff >> (((i - lengthMagicArray) & 3) * 8);
433 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i) & mask;
435 SETSHIFTEDINT (final, final_len + i, tmp);
437 final_len += lengthMagicArray;
441 for (i = 0; i < salt_len + 1; i += 4) // +1 for the 0x80
443 const u32 tmp = salt_buf[i / 4]; // attention, int[] not char[]
445 SETSHIFTEDINT (final, final_len + i, tmp);
448 final_len += salt_len;
455 for (left = final_len, off = 0; left >= 56; left -= 64, off += 16)
457 swap_buffer (&final[off]);
459 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
462 swap_buffer (&final[off]);
465 final[off + 15] = final_len * 8;
467 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
469 const u32x r0 = digest[3];
470 const u32x r1 = digest[4];
471 const u32x r2 = digest[2];
472 const u32x r3 = digest[1];
474 #include VECT_COMPARE_M
478 extern "C" __global__ void __launch_bounds__ (256, 1) m07800_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
482 extern "C" __global__ void __launch_bounds__ (256, 1) m07800_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
486 extern "C" __global__ void __launch_bounds__ (256, 1) m07800_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
492 const u32 lid = threadIdx.x;
498 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
500 if (gid >= gid_max) return;
504 pw_buf0[0] = pws[gid].i[ 0];
505 pw_buf0[1] = pws[gid].i[ 1];
506 pw_buf0[2] = pws[gid].i[ 2];
507 pw_buf0[3] = pws[gid].i[ 3];
511 pw_buf1[0] = pws[gid].i[ 4];
512 pw_buf1[1] = pws[gid].i[ 5];
513 pw_buf1[2] = pws[gid].i[ 6];
514 pw_buf1[3] = pws[gid].i[ 7];
516 const u32 pw_len = pws[gid].pw_len;
524 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
525 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
526 salt_buf[2] = salt_bufs[salt_pos].salt_buf[2];
527 salt_buf[3] = salt_bufs[salt_pos].salt_buf[3];
528 salt_buf[4] = salt_bufs[salt_pos].salt_buf[4];
529 salt_buf[5] = salt_bufs[salt_pos].salt_buf[5];
530 salt_buf[6] = salt_bufs[salt_pos].salt_buf[6];
531 salt_buf[7] = salt_bufs[salt_pos].salt_buf[7];
533 const u32 salt_len = salt_bufs[salt_pos].salt_len;
539 const u32 search[4] =
541 digests_buf[digests_offset].digest_buf[DGST_R0],
542 digests_buf[digests_offset].digest_buf[DGST_R1],
543 digests_buf[digests_offset].digest_buf[DGST_R2],
544 digests_buf[digests_offset].digest_buf[DGST_R3]
551 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
581 const u32 out_len = apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
615 switch_buffer_by_offset (s0, s1, s2, s3, out_len);
617 const u32 pw_salt_len = out_len + salt_len;
625 final[ 0] = swap_workaround (w0[0] | s0[0]);
626 final[ 1] = swap_workaround (w0[1] | s0[1]);
627 final[ 2] = swap_workaround (w0[2] | s0[2]);
628 final[ 3] = swap_workaround (w0[3] | s0[3]);
629 final[ 4] = swap_workaround (w1[0] | s1[0]);
630 final[ 5] = swap_workaround (w1[1] | s1[1]);
631 final[ 6] = swap_workaround (w1[2] | s1[2]);
632 final[ 7] = swap_workaround (w1[3] | s1[3]);
633 final[ 8] = swap_workaround (w2[0] | s2[0]);
634 final[ 9] = swap_workaround (w2[1] | s2[1]);
635 final[10] = swap_workaround (w2[2] | s2[2]);
636 final[11] = swap_workaround (w2[3] | s2[3]);
637 final[12] = swap_workaround (w3[0] | s3[0]);
638 final[13] = swap_workaround (w3[1] | s3[1]);
640 final[15] = pw_salt_len * 8;
650 sha1_transform (&final[0], &final[4], &final[8], &final[12], digest);
652 // prepare magic array range
654 u32x lengthMagicArray = 0x20;
655 u32x offsetMagicArray = 0;
657 lengthMagicArray += ((digest[0] >> 24) & 0xff) % 6;
658 lengthMagicArray += ((digest[0] >> 16) & 0xff) % 6;
659 lengthMagicArray += ((digest[0] >> 8) & 0xff) % 6;
660 lengthMagicArray += ((digest[0] >> 0) & 0xff) % 6;
661 lengthMagicArray += ((digest[1] >> 24) & 0xff) % 6;
662 lengthMagicArray += ((digest[1] >> 16) & 0xff) % 6;
663 lengthMagicArray += ((digest[1] >> 8) & 0xff) % 6;
664 lengthMagicArray += ((digest[1] >> 0) & 0xff) % 6;
665 lengthMagicArray += ((digest[2] >> 24) & 0xff) % 6;
666 lengthMagicArray += ((digest[2] >> 16) & 0xff) % 6;
667 offsetMagicArray += ((digest[2] >> 8) & 0xff) % 8;
668 offsetMagicArray += ((digest[2] >> 0) & 0xff) % 8;
669 offsetMagicArray += ((digest[3] >> 24) & 0xff) % 8;
670 offsetMagicArray += ((digest[3] >> 16) & 0xff) % 8;
671 offsetMagicArray += ((digest[3] >> 8) & 0xff) % 8;
672 offsetMagicArray += ((digest[3] >> 0) & 0xff) % 8;
673 offsetMagicArray += ((digest[4] >> 24) & 0xff) % 8;
674 offsetMagicArray += ((digest[4] >> 16) & 0xff) % 8;
675 offsetMagicArray += ((digest[4] >> 8) & 0xff) % 8;
676 offsetMagicArray += ((digest[4] >> 0) & 0xff) % 8;
687 for (int i = 0; i < 64; i++) final[i] = 0;
698 u32 final_len = out_len;
704 for (i = 0; i < lengthMagicArray - 4; i += 4)
706 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i);
708 SETSHIFTEDINT (final, final_len + i, tmp);
711 const u32 mask = 0xffffffff >> (((i - lengthMagicArray) & 3) * 8);
713 const u32 tmp = GETSHIFTEDINT (theMagicArray, offsetMagicArray + i) & mask;
715 SETSHIFTEDINT (final, final_len + i, tmp);
717 final_len += lengthMagicArray;
721 for (i = 0; i < salt_len + 1; i += 4) // +1 for the 0x80
723 const u32 tmp = salt_buf[i / 4]; // attention, int[] not char[]
725 SETSHIFTEDINT (final, final_len + i, tmp);
728 final_len += salt_len;
735 for (left = final_len, off = 0; left >= 56; left -= 64, off += 16)
737 swap_buffer (&final[off]);
739 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
742 swap_buffer (&final[off]);
745 final[off + 15] = final_len * 8;
747 sha1_transform (&final[off + 0], &final[off + 4], &final[off + 8], &final[off + 12], digest);
749 const u32x r0 = digest[3];
750 const u32x r1 = digest[4];
751 const u32x r2 = digest[2];
752 const u32x r3 = digest[1];
754 #include VECT_COMPARE_S
758 extern "C" __global__ void __launch_bounds__ (256, 1) m07800_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
762 extern "C" __global__ void __launch_bounds__ (256, 1) m07800_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)