2 * Author......: Jens Steube <jens.steube@gmail.com>
6 #define _PBKDF2_SHA512_
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
32 __device__ __constant__ u64 k[80] =
34 SHA512C00, SHA512C01, SHA512C02, SHA512C03,
35 SHA512C04, SHA512C05, SHA512C06, SHA512C07,
36 SHA512C08, SHA512C09, SHA512C0a, SHA512C0b,
37 SHA512C0c, SHA512C0d, SHA512C0e, SHA512C0f,
38 SHA512C10, SHA512C11, SHA512C12, SHA512C13,
39 SHA512C14, SHA512C15, SHA512C16, SHA512C17,
40 SHA512C18, SHA512C19, SHA512C1a, SHA512C1b,
41 SHA512C1c, SHA512C1d, SHA512C1e, SHA512C1f,
42 SHA512C20, SHA512C21, SHA512C22, SHA512C23,
43 SHA512C24, SHA512C25, SHA512C26, SHA512C27,
44 SHA512C28, SHA512C29, SHA512C2a, SHA512C2b,
45 SHA512C2c, SHA512C2d, SHA512C2e, SHA512C2f,
46 SHA512C30, SHA512C31, SHA512C32, SHA512C33,
47 SHA512C34, SHA512C35, SHA512C36, SHA512C37,
48 SHA512C38, SHA512C39, SHA512C3a, SHA512C3b,
49 SHA512C3c, SHA512C3d, SHA512C3e, SHA512C3f,
50 SHA512C40, SHA512C41, SHA512C42, SHA512C43,
51 SHA512C44, SHA512C45, SHA512C46, SHA512C47,
52 SHA512C48, SHA512C49, SHA512C4a, SHA512C4b,
53 SHA512C4c, SHA512C4d, SHA512C4e, SHA512C4f,
56 #define ROUND_EXPAND() \
58 w0_t = SHA512_EXPAND (we_t, w9_t, w1_t, w0_t); \
59 w1_t = SHA512_EXPAND (wf_t, wa_t, w2_t, w1_t); \
60 w2_t = SHA512_EXPAND (w0_t, wb_t, w3_t, w2_t); \
61 w3_t = SHA512_EXPAND (w1_t, wc_t, w4_t, w3_t); \
62 w4_t = SHA512_EXPAND (w2_t, wd_t, w5_t, w4_t); \
63 w5_t = SHA512_EXPAND (w3_t, we_t, w6_t, w5_t); \
64 w6_t = SHA512_EXPAND (w4_t, wf_t, w7_t, w6_t); \
65 w7_t = SHA512_EXPAND (w5_t, w0_t, w8_t, w7_t); \
66 w8_t = SHA512_EXPAND (w6_t, w1_t, w9_t, w8_t); \
67 w9_t = SHA512_EXPAND (w7_t, w2_t, wa_t, w9_t); \
68 wa_t = SHA512_EXPAND (w8_t, w3_t, wb_t, wa_t); \
69 wb_t = SHA512_EXPAND (w9_t, w4_t, wc_t, wb_t); \
70 wc_t = SHA512_EXPAND (wa_t, w5_t, wd_t, wc_t); \
71 wd_t = SHA512_EXPAND (wb_t, w6_t, we_t, wd_t); \
72 we_t = SHA512_EXPAND (wc_t, w7_t, wf_t, we_t); \
73 wf_t = SHA512_EXPAND (wd_t, w8_t, w0_t, wf_t); \
76 #define ROUND_STEP(i) \
78 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w0_t, k[i + 0]); \
79 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w1_t, k[i + 1]); \
80 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, w2_t, k[i + 2]); \
81 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, w3_t, k[i + 3]); \
82 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, w4_t, k[i + 4]); \
83 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, w5_t, k[i + 5]); \
84 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, w6_t, k[i + 6]); \
85 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, w7_t, k[i + 7]); \
86 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w8_t, k[i + 8]); \
87 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w9_t, k[i + 9]); \
88 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, wa_t, k[i + 10]); \
89 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, wb_t, k[i + 11]); \
90 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, wc_t, k[i + 12]); \
91 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, wd_t, k[i + 13]); \
92 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, we_t, k[i + 14]); \
93 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, wf_t, k[i + 15]); \
96 __device__ static void sha512_transform (const u64 w[16], u64 dgst[8])
126 for (int i = 16; i < 80; i += 16)
128 ROUND_EXPAND (); ROUND_STEP (i);
141 __device__ static void hmac_run (const u64 w1[16], const u64 ipad[8], const u64 opad[8], u64 dgst[8])
152 sha512_transform (w1, dgst);
164 w[ 8] = 0x8000000000000000;
171 w[15] = (128 + 64) * 8;
182 sha512_transform (w, dgst);
185 __device__ static void hmac_init (u64 w[16], u64 ipad[8], u64 opad[8])
187 w[ 0] ^= 0x3636363636363636;
188 w[ 1] ^= 0x3636363636363636;
189 w[ 2] ^= 0x3636363636363636;
190 w[ 3] ^= 0x3636363636363636;
191 w[ 4] ^= 0x3636363636363636;
192 w[ 5] ^= 0x3636363636363636;
193 w[ 6] ^= 0x3636363636363636;
194 w[ 7] ^= 0x3636363636363636;
195 w[ 8] ^= 0x3636363636363636;
196 w[ 9] ^= 0x3636363636363636;
197 w[10] ^= 0x3636363636363636;
198 w[11] ^= 0x3636363636363636;
199 w[12] ^= 0x3636363636363636;
200 w[13] ^= 0x3636363636363636;
201 w[14] ^= 0x3636363636363636;
202 w[15] ^= 0x3636363636363636;
213 sha512_transform (w, ipad);
215 w[ 0] ^= 0x6a6a6a6a6a6a6a6a;
216 w[ 1] ^= 0x6a6a6a6a6a6a6a6a;
217 w[ 2] ^= 0x6a6a6a6a6a6a6a6a;
218 w[ 3] ^= 0x6a6a6a6a6a6a6a6a;
219 w[ 4] ^= 0x6a6a6a6a6a6a6a6a;
220 w[ 5] ^= 0x6a6a6a6a6a6a6a6a;
221 w[ 6] ^= 0x6a6a6a6a6a6a6a6a;
222 w[ 7] ^= 0x6a6a6a6a6a6a6a6a;
223 w[ 8] ^= 0x6a6a6a6a6a6a6a6a;
224 w[ 9] ^= 0x6a6a6a6a6a6a6a6a;
225 w[10] ^= 0x6a6a6a6a6a6a6a6a;
226 w[11] ^= 0x6a6a6a6a6a6a6a6a;
227 w[12] ^= 0x6a6a6a6a6a6a6a6a;
228 w[13] ^= 0x6a6a6a6a6a6a6a6a;
229 w[14] ^= 0x6a6a6a6a6a6a6a6a;
230 w[15] ^= 0x6a6a6a6a6a6a6a6a;
241 sha512_transform (w, opad);
244 extern "C" __global__ void __launch_bounds__ (256, 1) m07100_init (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, pbkdf2_sha512_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const pbkdf2_sha512_t *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
250 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
252 if (gid >= gid_max) return;
256 w0[0] = swap_workaround (pws[gid].i[ 0]);
257 w0[1] = swap_workaround (pws[gid].i[ 1]);
258 w0[2] = swap_workaround (pws[gid].i[ 2]);
259 w0[3] = swap_workaround (pws[gid].i[ 3]);
263 w1[0] = swap_workaround (pws[gid].i[ 4]);
264 w1[1] = swap_workaround (pws[gid].i[ 5]);
265 w1[2] = swap_workaround (pws[gid].i[ 6]);
266 w1[3] = swap_workaround (pws[gid].i[ 7]);
270 w2[0] = swap_workaround (pws[gid].i[ 8]);
271 w2[1] = swap_workaround (pws[gid].i[ 9]);
272 w2[2] = swap_workaround (pws[gid].i[10]);
273 w2[3] = swap_workaround (pws[gid].i[11]);
277 w3[0] = swap_workaround (pws[gid].i[12]);
278 w3[1] = swap_workaround (pws[gid].i[13]);
279 w3[2] = swap_workaround (pws[gid].i[14]);
280 w3[3] = swap_workaround (pws[gid].i[15]);
288 u32 salt_len = salt_bufs[salt_pos].salt_len;
290 esalt_buf[ 0] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[ 0]), swap_workaround (esalt_bufs[salt_pos].salt_buf[ 1]));
291 esalt_buf[ 1] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[ 2]), swap_workaround (esalt_bufs[salt_pos].salt_buf[ 3]));
292 esalt_buf[ 2] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[ 4]), swap_workaround (esalt_bufs[salt_pos].salt_buf[ 5]));
293 esalt_buf[ 3] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[ 6]), swap_workaround (esalt_bufs[salt_pos].salt_buf[ 7]));
294 esalt_buf[ 4] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[ 8]), swap_workaround (esalt_bufs[salt_pos].salt_buf[ 9]));
295 esalt_buf[ 5] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[10]), swap_workaround (esalt_bufs[salt_pos].salt_buf[11]));
296 esalt_buf[ 6] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[12]), swap_workaround (esalt_bufs[salt_pos].salt_buf[13]));
297 esalt_buf[ 7] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[14]), swap_workaround (esalt_bufs[salt_pos].salt_buf[15]));
298 esalt_buf[ 8] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[16]), swap_workaround (esalt_bufs[salt_pos].salt_buf[17]));
299 esalt_buf[ 9] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[18]), swap_workaround (esalt_bufs[salt_pos].salt_buf[19]));
300 esalt_buf[10] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[20]), swap_workaround (esalt_bufs[salt_pos].salt_buf[21]));
301 esalt_buf[11] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[22]), swap_workaround (esalt_bufs[salt_pos].salt_buf[23]));
302 esalt_buf[12] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[24]), swap_workaround (esalt_bufs[salt_pos].salt_buf[25]));
303 esalt_buf[13] = hl32_to_64 (swap_workaround (esalt_bufs[salt_pos].salt_buf[26]), swap_workaround (esalt_bufs[salt_pos].salt_buf[27]));
305 esalt_buf[15] = (128 + salt_len + 4) * 8;
309 w[ 0] = hl32_to_64 (w0[0], w0[1]);
310 w[ 1] = hl32_to_64 (w0[2], w0[3]);
311 w[ 2] = hl32_to_64 (w1[0], w1[1]);
312 w[ 3] = hl32_to_64 (w1[2], w1[3]);
313 w[ 4] = hl32_to_64 (w2[0], w2[1]);
314 w[ 5] = hl32_to_64 (w2[2], w2[3]);
315 w[ 6] = hl32_to_64 (w3[0], w3[1]);
316 w[ 7] = hl32_to_64 (w3[2], w3[3]);
329 hmac_init (w, ipad, opad);
331 tmps[gid].ipad[0] = ipad[0];
332 tmps[gid].ipad[1] = ipad[1];
333 tmps[gid].ipad[2] = ipad[2];
334 tmps[gid].ipad[3] = ipad[3];
335 tmps[gid].ipad[4] = ipad[4];
336 tmps[gid].ipad[5] = ipad[5];
337 tmps[gid].ipad[6] = ipad[6];
338 tmps[gid].ipad[7] = ipad[7];
340 tmps[gid].opad[0] = opad[0];
341 tmps[gid].opad[1] = opad[1];
342 tmps[gid].opad[2] = opad[2];
343 tmps[gid].opad[3] = opad[3];
344 tmps[gid].opad[4] = opad[4];
345 tmps[gid].opad[5] = opad[5];
346 tmps[gid].opad[6] = opad[6];
347 tmps[gid].opad[7] = opad[7];
349 for (u32 i = 0, j = 1; i < 8; i += 8, j += 1)
353 hmac_run (esalt_buf, ipad, opad, dgst);
355 tmps[gid].dgst[i + 0] = dgst[0];
356 tmps[gid].dgst[i + 1] = dgst[1];
357 tmps[gid].dgst[i + 2] = dgst[2];
358 tmps[gid].dgst[i + 3] = dgst[3];
359 tmps[gid].dgst[i + 4] = dgst[4];
360 tmps[gid].dgst[i + 5] = dgst[5];
361 tmps[gid].dgst[i + 6] = dgst[6];
362 tmps[gid].dgst[i + 7] = dgst[7];
364 tmps[gid].out[i + 0] = dgst[0];
365 tmps[gid].out[i + 1] = dgst[1];
366 tmps[gid].out[i + 2] = dgst[2];
367 tmps[gid].out[i + 3] = dgst[3];
368 tmps[gid].out[i + 4] = dgst[4];
369 tmps[gid].out[i + 5] = dgst[5];
370 tmps[gid].out[i + 6] = dgst[6];
371 tmps[gid].out[i + 7] = dgst[7];
375 extern "C" __global__ void __launch_bounds__ (256, 1) m07100_loop (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, pbkdf2_sha512_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const pbkdf2_sha512_t *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
377 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
379 if (gid >= gid_max) return;
383 ipad[0] = tmps[gid].ipad[0];
384 ipad[1] = tmps[gid].ipad[1];
385 ipad[2] = tmps[gid].ipad[2];
386 ipad[3] = tmps[gid].ipad[3];
387 ipad[4] = tmps[gid].ipad[4];
388 ipad[5] = tmps[gid].ipad[5];
389 ipad[6] = tmps[gid].ipad[6];
390 ipad[7] = tmps[gid].ipad[7];
394 opad[0] = tmps[gid].opad[0];
395 opad[1] = tmps[gid].opad[1];
396 opad[2] = tmps[gid].opad[2];
397 opad[3] = tmps[gid].opad[3];
398 opad[4] = tmps[gid].opad[4];
399 opad[5] = tmps[gid].opad[5];
400 opad[6] = tmps[gid].opad[6];
401 opad[7] = tmps[gid].opad[7];
403 for (u32 i = 0; i < 8; i += 8)
407 dgst[0] = tmps[gid].dgst[i + 0];
408 dgst[1] = tmps[gid].dgst[i + 1];
409 dgst[2] = tmps[gid].dgst[i + 2];
410 dgst[3] = tmps[gid].dgst[i + 3];
411 dgst[4] = tmps[gid].dgst[i + 4];
412 dgst[5] = tmps[gid].dgst[i + 5];
413 dgst[6] = tmps[gid].dgst[i + 6];
414 dgst[7] = tmps[gid].dgst[i + 7];
418 out[0] = tmps[gid].out[i + 0];
419 out[1] = tmps[gid].out[i + 1];
420 out[2] = tmps[gid].out[i + 2];
421 out[3] = tmps[gid].out[i + 3];
422 out[4] = tmps[gid].out[i + 4];
423 out[5] = tmps[gid].out[i + 5];
424 out[6] = tmps[gid].out[i + 6];
425 out[7] = tmps[gid].out[i + 7];
427 for (u32 j = 0; j < loop_cnt; j++)
439 w[ 8] = 0x8000000000000000;
446 w[15] = (128 + 64) * 8;
448 hmac_run (w, ipad, opad, dgst);
460 tmps[gid].dgst[i + 0] = dgst[0];
461 tmps[gid].dgst[i + 1] = dgst[1];
462 tmps[gid].dgst[i + 2] = dgst[2];
463 tmps[gid].dgst[i + 3] = dgst[3];
464 tmps[gid].dgst[i + 4] = dgst[4];
465 tmps[gid].dgst[i + 5] = dgst[5];
466 tmps[gid].dgst[i + 6] = dgst[6];
467 tmps[gid].dgst[i + 7] = dgst[7];
469 tmps[gid].out[i + 0] = out[0];
470 tmps[gid].out[i + 1] = out[1];
471 tmps[gid].out[i + 2] = out[2];
472 tmps[gid].out[i + 3] = out[3];
473 tmps[gid].out[i + 4] = out[4];
474 tmps[gid].out[i + 5] = out[5];
475 tmps[gid].out[i + 6] = out[6];
476 tmps[gid].out[i + 7] = out[7];
480 extern "C" __global__ void __launch_bounds__ (256, 1) m07100_comp (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, pbkdf2_sha512_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const pbkdf2_sha512_t *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
486 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
488 if (gid >= gid_max) return;
490 const u32 lid = threadIdx.x;
492 const u64x a = tmps[gid].out[0];
493 const u64x b = tmps[gid].out[1];
495 const u32x r0 = l32_from_64 (a);
496 const u32x r1 = h32_from_64 (a);
497 const u32x r2 = l32_from_64 (b);
498 const u32x r3 = h32_from_64 (b);
502 #include VECT_COMPARE_M