2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
33 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
36 __device__ static void md5_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[4])
62 MD5_STEP (MD5_Fo, a, b, c, d, w0_t, MD5C00, MD5S00);
63 MD5_STEP (MD5_Fo, d, a, b, c, w1_t, MD5C01, MD5S01);
64 MD5_STEP (MD5_Fo, c, d, a, b, w2_t, MD5C02, MD5S02);
65 MD5_STEP (MD5_Fo, b, c, d, a, w3_t, MD5C03, MD5S03);
66 MD5_STEP (MD5_Fo, a, b, c, d, w4_t, MD5C04, MD5S00);
67 MD5_STEP (MD5_Fo, d, a, b, c, w5_t, MD5C05, MD5S01);
68 MD5_STEP (MD5_Fo, c, d, a, b, w6_t, MD5C06, MD5S02);
69 MD5_STEP (MD5_Fo, b, c, d, a, w7_t, MD5C07, MD5S03);
70 MD5_STEP (MD5_Fo, a, b, c, d, w8_t, MD5C08, MD5S00);
71 MD5_STEP (MD5_Fo, d, a, b, c, w9_t, MD5C09, MD5S01);
72 MD5_STEP (MD5_Fo, c, d, a, b, wa_t, MD5C0a, MD5S02);
73 MD5_STEP (MD5_Fo, b, c, d, a, wb_t, MD5C0b, MD5S03);
74 MD5_STEP (MD5_Fo, a, b, c, d, wc_t, MD5C0c, MD5S00);
75 MD5_STEP (MD5_Fo, d, a, b, c, wd_t, MD5C0d, MD5S01);
76 MD5_STEP (MD5_Fo, c, d, a, b, we_t, MD5C0e, MD5S02);
77 MD5_STEP (MD5_Fo, b, c, d, a, wf_t, MD5C0f, MD5S03);
79 MD5_STEP (MD5_Go, a, b, c, d, w1_t, MD5C10, MD5S10);
80 MD5_STEP (MD5_Go, d, a, b, c, w6_t, MD5C11, MD5S11);
81 MD5_STEP (MD5_Go, c, d, a, b, wb_t, MD5C12, MD5S12);
82 MD5_STEP (MD5_Go, b, c, d, a, w0_t, MD5C13, MD5S13);
83 MD5_STEP (MD5_Go, a, b, c, d, w5_t, MD5C14, MD5S10);
84 MD5_STEP (MD5_Go, d, a, b, c, wa_t, MD5C15, MD5S11);
85 MD5_STEP (MD5_Go, c, d, a, b, wf_t, MD5C16, MD5S12);
86 MD5_STEP (MD5_Go, b, c, d, a, w4_t, MD5C17, MD5S13);
87 MD5_STEP (MD5_Go, a, b, c, d, w9_t, MD5C18, MD5S10);
88 MD5_STEP (MD5_Go, d, a, b, c, we_t, MD5C19, MD5S11);
89 MD5_STEP (MD5_Go, c, d, a, b, w3_t, MD5C1a, MD5S12);
90 MD5_STEP (MD5_Go, b, c, d, a, w8_t, MD5C1b, MD5S13);
91 MD5_STEP (MD5_Go, a, b, c, d, wd_t, MD5C1c, MD5S10);
92 MD5_STEP (MD5_Go, d, a, b, c, w2_t, MD5C1d, MD5S11);
93 MD5_STEP (MD5_Go, c, d, a, b, w7_t, MD5C1e, MD5S12);
94 MD5_STEP (MD5_Go, b, c, d, a, wc_t, MD5C1f, MD5S13);
96 MD5_STEP (MD5_H1, a, b, c, d, w5_t, MD5C20, MD5S20);
97 MD5_STEP (MD5_H2, d, a, b, c, w8_t, MD5C21, MD5S21);
98 MD5_STEP (MD5_H1, c, d, a, b, wb_t, MD5C22, MD5S22);
99 MD5_STEP (MD5_H2, b, c, d, a, we_t, MD5C23, MD5S23);
100 MD5_STEP (MD5_H1, a, b, c, d, w1_t, MD5C24, MD5S20);
101 MD5_STEP (MD5_H2, d, a, b, c, w4_t, MD5C25, MD5S21);
102 MD5_STEP (MD5_H1, c, d, a, b, w7_t, MD5C26, MD5S22);
103 MD5_STEP (MD5_H2, b, c, d, a, wa_t, MD5C27, MD5S23);
104 MD5_STEP (MD5_H1, a, b, c, d, wd_t, MD5C28, MD5S20);
105 MD5_STEP (MD5_H2, d, a, b, c, w0_t, MD5C29, MD5S21);
106 MD5_STEP (MD5_H1, c, d, a, b, w3_t, MD5C2a, MD5S22);
107 MD5_STEP (MD5_H2, b, c, d, a, w6_t, MD5C2b, MD5S23);
108 MD5_STEP (MD5_H1, a, b, c, d, w9_t, MD5C2c, MD5S20);
109 MD5_STEP (MD5_H2, d, a, b, c, wc_t, MD5C2d, MD5S21);
110 MD5_STEP (MD5_H1, c, d, a, b, wf_t, MD5C2e, MD5S22);
111 MD5_STEP (MD5_H2, b, c, d, a, w2_t, MD5C2f, MD5S23);
113 MD5_STEP (MD5_I , a, b, c, d, w0_t, MD5C30, MD5S30);
114 MD5_STEP (MD5_I , d, a, b, c, w7_t, MD5C31, MD5S31);
115 MD5_STEP (MD5_I , c, d, a, b, we_t, MD5C32, MD5S32);
116 MD5_STEP (MD5_I , b, c, d, a, w5_t, MD5C33, MD5S33);
117 MD5_STEP (MD5_I , a, b, c, d, wc_t, MD5C34, MD5S30);
118 MD5_STEP (MD5_I , d, a, b, c, w3_t, MD5C35, MD5S31);
119 MD5_STEP (MD5_I , c, d, a, b, wa_t, MD5C36, MD5S32);
120 MD5_STEP (MD5_I , b, c, d, a, w1_t, MD5C37, MD5S33);
121 MD5_STEP (MD5_I , a, b, c, d, w8_t, MD5C38, MD5S30);
122 MD5_STEP (MD5_I , d, a, b, c, wf_t, MD5C39, MD5S31);
123 MD5_STEP (MD5_I , c, d, a, b, w6_t, MD5C3a, MD5S32);
124 MD5_STEP (MD5_I , b, c, d, a, wd_t, MD5C3b, MD5S33);
125 MD5_STEP (MD5_I , a, b, c, d, w4_t, MD5C3c, MD5S30);
126 MD5_STEP (MD5_I , d, a, b, c, wb_t, MD5C3d, MD5S31);
127 MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
128 MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
136 __device__ static void memcat16 (u32x block0[4], u32x block1[4], u32x block2[4], u32x block3[4], const u32 block_len, const u32x append[4])
144 #if __CUDA_ARCH__ >= 200
146 const int offset_minus_4 = 4 - (block_len & 3);
148 const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
150 tmp0 = __byte_perm ( 0, append[0], selector);
151 tmp1 = __byte_perm (append[0], append[1], selector);
152 tmp2 = __byte_perm (append[1], append[2], selector);
153 tmp3 = __byte_perm (append[2], append[3], selector);
154 tmp4 = __byte_perm (append[3], 0, selector);
158 const u32 mod = block_len & 3;
162 case 0: tmp0 = append[0];
168 case 1: tmp0 = append[0] << 8;
169 tmp1 = append[0] >> 24 | append[1] << 8;
170 tmp2 = append[1] >> 24 | append[2] << 8;
171 tmp3 = append[2] >> 24 | append[3] << 8;
172 tmp4 = append[3] >> 24;
174 case 2: tmp0 = append[0] << 16;
175 tmp1 = append[0] >> 16 | append[1] << 16;
176 tmp2 = append[1] >> 16 | append[2] << 16;
177 tmp3 = append[2] >> 16 | append[3] << 16;
178 tmp4 = append[3] >> 16;
180 case 3: tmp0 = append[0] << 24;
181 tmp1 = append[0] >> 8 | append[1] << 24;
182 tmp2 = append[1] >> 8 | append[2] << 24;
183 tmp3 = append[2] >> 8 | append[3] << 24;
184 tmp4 = append[3] >> 8;
190 const u32 div = block_len / 4;
194 case 0: block0[0] |= tmp0;
200 case 1: block0[1] |= tmp0;
206 case 2: block0[2] |= tmp0;
212 case 3: block0[3] |= tmp0;
218 case 4: block1[0] |= tmp0;
224 case 5: block1[1] |= tmp0;
230 case 6: block1[2] |= tmp0;
236 case 7: block1[3] |= tmp0;
242 case 8: block2[0] |= tmp0;
248 case 9: block2[1] |= tmp0;
259 __device__ static void memcat16_x80 (u32x block0[4], u32x block1[4], u32x block2[4], u32x block3[4], const u32 block_len, const u32x append[4])
267 #if __CUDA_ARCH__ >= 200
269 const int offset_minus_4 = 4 - (block_len & 3);
271 const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
273 tmp0 = __byte_perm ( 0, append[0], selector);
274 tmp1 = __byte_perm (append[0], append[1], selector);
275 tmp2 = __byte_perm (append[1], append[2], selector);
276 tmp3 = __byte_perm (append[2], append[3], selector);
277 tmp4 = __byte_perm (append[3], 0x80, selector);
281 const u32 mod = block_len & 3;
285 case 0: tmp0 = append[0];
291 case 1: tmp0 = append[0] << 8;
292 tmp1 = append[0] >> 24 | append[1] << 8;
293 tmp2 = append[1] >> 24 | append[2] << 8;
294 tmp3 = append[2] >> 24 | append[3] << 8;
295 tmp4 = append[3] >> 24;
297 case 2: tmp0 = append[0] << 16;
298 tmp1 = append[0] >> 16 | append[1] << 16;
299 tmp2 = append[1] >> 16 | append[2] << 16;
300 tmp3 = append[2] >> 16 | append[3] << 16;
301 tmp4 = append[3] >> 16;
303 case 3: tmp0 = append[0] << 24;
304 tmp1 = append[0] >> 8 | append[1] << 24;
305 tmp2 = append[1] >> 8 | append[2] << 24;
306 tmp3 = append[2] >> 8 | append[3] << 24;
307 tmp4 = append[3] >> 8;
313 const u32 div = block_len / 4;
317 case 0: block0[0] |= tmp0;
323 case 1: block0[1] |= tmp0;
329 case 2: block0[2] |= tmp0;
335 case 3: block0[3] |= tmp0;
341 case 4: block1[0] |= tmp0;
347 case 5: block1[1] |= tmp0;
353 case 6: block1[2] |= tmp0;
359 case 7: block1[3] |= tmp0;
365 case 8: block2[0] |= tmp0;
371 case 9: block2[1] |= tmp0;
382 __device__ static void memcat8 (u32x block0[4], u32x block1[4], u32x block2[4], u32x block3[4], const u32 block_len, const u32 append[2])
388 #if __CUDA_ARCH__ >= 200
390 const int offset_minus_4 = 4 - (block_len & 3);
392 const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
394 tmp0 = __byte_perm ( 0, append[0], selector);
395 tmp1 = __byte_perm (append[0], append[1], selector);
396 tmp2 = __byte_perm (append[1], 0, selector);
400 const u32 mod = block_len & 3;
404 case 0: tmp0 = append[0];
408 case 1: tmp0 = append[0] << 8;
409 tmp1 = append[0] >> 24 | append[1] << 8;
410 tmp2 = append[1] >> 24;
412 case 2: tmp0 = append[0] << 16;
413 tmp1 = append[0] >> 16 | append[1] << 16;
414 tmp2 = append[1] >> 16;
416 case 3: tmp0 = append[0] << 24;
417 tmp1 = append[0] >> 8 | append[1] << 24;
418 tmp2 = append[1] >> 8;
424 const u32 div = block_len / 4;
428 case 0: block0[0] |= tmp0;
432 case 1: block0[1] |= tmp0;
436 case 2: block0[2] |= tmp0;
440 case 3: block0[3] |= tmp0;
444 case 4: block1[0] |= tmp0;
448 case 5: block1[1] |= tmp0;
452 case 6: block1[2] |= tmp0;
456 case 7: block1[3] |= tmp0;
460 case 8: block2[0] |= tmp0;
464 case 9: block2[1] |= tmp0;
468 case 10: block2[2] |= tmp0;
472 case 11: block2[3] |= tmp0;
481 __device__ static void append_1st (u32x block0[4], u32x block1[4], u32x block2[4], u32x block3[4], const u32 block_len, const u32x append)
490 block0[0] = block0[0] | append << 8;
494 block0[0] = block0[0] | append << 16;
498 block0[0] = block0[0] | append << 24;
506 block0[1] = block0[1] | append << 8;
510 block0[1] = block0[1] | append << 16;
514 block0[1] = block0[1] | append << 24;
522 block0[2] = block0[2] | append << 8;
526 block0[2] = block0[2] | append << 16;
530 block0[2] = block0[2] | append << 24;
538 block0[3] = block0[3] | append << 8;
542 block0[3] = block0[3] | append << 16;
546 block0[3] = block0[3] | append << 24;
554 block1[0] = block1[0] | append << 8;
558 block1[0] = block1[0] | append << 16;
562 block1[0] = block1[0] | append << 24;
570 block1[1] = block1[1] | append << 8;
574 block1[1] = block1[1] | append << 16;
578 block1[1] = block1[1] | append << 24;
586 block1[2] = block1[2] | append << 8;
590 block1[2] = block1[2] | append << 16;
594 block1[2] = block1[2] | append << 24;
602 block1[3] = block1[3] | append << 8;
606 block1[3] = block1[3] | append << 16;
610 block1[3] = block1[3] | append << 24;
618 block2[0] = block2[0] | append << 8;
622 block2[0] = block2[0] | append << 16;
626 block2[0] = block2[0] | append << 24;
634 block2[1] = block2[1] | append << 8;
638 block2[1] = block2[1] | append << 16;
642 block2[1] = block2[1] | append << 24;
650 block2[2] = block2[2] | append << 8;
654 block2[2] = block2[2] | append << 16;
658 block2[2] = block2[2] | append << 24;
666 block2[3] = block2[3] | append << 8;
670 block2[3] = block2[3] | append << 16;
674 block2[3] = block2[3] | append << 24;
682 block3[0] = block3[0] | append << 8;
686 block3[0] = block3[0] | append << 16;
690 block3[0] = block3[0] | append << 24;
698 block3[1] = block3[1] | append << 8;
702 block3[1] = block3[1] | append << 16;
706 block3[1] = block3[1] | append << 24;
715 extern "C" __global__ void __launch_bounds__ (256, 1) m06300_init (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, md5crypt_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
717 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
719 if (gid >= gid_max) return;
723 w0[0] = pws[gid].i[0];
724 w0[1] = pws[gid].i[1];
725 w0[2] = pws[gid].i[2];
726 w0[3] = pws[gid].i[3];
728 const u32 pw_len = pws[gid].pw_len;
736 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
737 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
739 const u32 salt_len = salt_bufs[salt_pos].salt_len;
745 //memcat16 (block0, block1, block2, block3, block_len, w0);
746 //block_len += pw_len;
748 u32 block_len = pw_len;
778 memcat8 (block0, block1, block2, block3, block_len, salt_buf);
780 block_len += salt_len;
782 memcat16 (block0, block1, block2, block3, block_len, w0);
786 append_0x80_4 (block0, block1, block2, block3, block_len);
788 block3[2] = block_len * 8;
797 md5_transform (block0, block1, block2, block3, digest);
799 /* The password first, since that is what is most unknown */
800 /* Then the raw salt */
801 /* Then just as many characters of the MD5(pw,salt,pw) */
803 //memcat16 (block0, block1, block2, block3, block_len, w);
804 //block_len += pw_len;
828 memcat8 (block0, block1, block2, block3, block_len, salt_buf);
830 block_len += salt_len;
832 truncate_block (digest, pw_len);
834 memcat16 (block0, block1, block2, block3, block_len, digest);
838 /* Then something really weird... */
840 u32x append = block0[0] & 0xFF;
842 for (u32 j = pw_len; j; j >>= 1)
846 append_1st (block0, block1, block2, block3, block_len, append);
852 append_0x80_4 (block0, block1, block2, block3, block_len);
854 block3[2] = block_len * 8;
861 md5_transform (block0, block1, block2, block3, digest);
863 tmps[gid].digest_buf[0] = digest[0];
864 tmps[gid].digest_buf[1] = digest[1];
865 tmps[gid].digest_buf[2] = digest[2];
866 tmps[gid].digest_buf[3] = digest[3];
869 extern "C" __global__ void __launch_bounds__ (256, 1) m06300_loop (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, md5crypt_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
875 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
877 if (gid >= gid_max) return;
881 w0[0] = pws[gid].i[0];
882 w0[1] = pws[gid].i[1];
883 w0[2] = pws[gid].i[2];
884 w0[3] = pws[gid].i[3];
886 const u32 pw_len = pws[gid].pw_len;
895 append_0x80_1 (w0_x80, pw_len);
903 salt_buf[0] = salt_bufs[salt_pos].salt_buf[0];
904 salt_buf[1] = salt_bufs[salt_pos].salt_buf[1];
906 const u32 salt_len = salt_bufs[salt_pos].salt_len;
914 digest[0] = tmps[gid].digest_buf[0];
915 digest[1] = tmps[gid].digest_buf[1];
916 digest[2] = tmps[gid].digest_buf[2];
917 digest[3] = tmps[gid].digest_buf[3];
923 /* and now, just to make sure things don't run too fast */
955 for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++)
968 const u32 j1 = (j & 1) ? 1 : 0;
969 const u32 j3 = (j % 3) ? 1 : 0;
970 const u32 j7 = (j % 7) ? 1 : 0;
983 memcat8 (block0, block1, block2, block3, block_len, salt_buf);
985 block_len += salt_len;
990 memcat16 (block0, block1, block2, block3, block_len, w0);
995 memcat16_x80 (block0, block1, block2, block3, block_len, digest);
1001 block0[0] = digest[0];
1002 block0[1] = digest[1];
1003 block0[2] = digest[2];
1004 block0[3] = digest[3];
1010 block1[0] = salt_buf[0];
1011 block1[1] = salt_buf[1];
1013 block_len += salt_len;
1015 memcat16 (block0, block1, block2, block3, block_len, w0);
1017 block_len += pw_len;
1021 block1[0] = salt_buf[0];
1022 block1[1] = salt_buf[1];
1024 block_len += salt_len;
1033 block_len += pw_len;
1036 memcat16 (block0, block1, block2, block3, block_len, w0_x80);
1038 block_len += pw_len;
1041 block3[2] = block_len * 8;
1048 md5_transform (block0, block1, block2, block3, digest);
1051 tmps[gid].digest_buf[0] = digest[0];
1052 tmps[gid].digest_buf[1] = digest[1];
1053 tmps[gid].digest_buf[2] = digest[2];
1054 tmps[gid].digest_buf[3] = digest[3];
1057 extern "C" __global__ void __launch_bounds__ (256, 1) m06300_comp (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, md5crypt_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
1063 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
1065 if (gid >= gid_max) return;
1067 const u32 lid = threadIdx.x;
1073 const u32x r0 = tmps[gid].digest_buf[DGST_R0];
1074 const u32x r1 = tmps[gid].digest_buf[DGST_R1];
1075 const u32x r2 = tmps[gid].digest_buf[DGST_R2];
1076 const u32x r3 = tmps[gid].digest_buf[DGST_R3];
1080 #include VECT_COMPARE_M