2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
32 __device__ __constant__ u64 k[80] =
34 SHA512C00, SHA512C01, SHA512C02, SHA512C03,
35 SHA512C04, SHA512C05, SHA512C06, SHA512C07,
36 SHA512C08, SHA512C09, SHA512C0a, SHA512C0b,
37 SHA512C0c, SHA512C0d, SHA512C0e, SHA512C0f,
38 SHA512C10, SHA512C11, SHA512C12, SHA512C13,
39 SHA512C14, SHA512C15, SHA512C16, SHA512C17,
40 SHA512C18, SHA512C19, SHA512C1a, SHA512C1b,
41 SHA512C1c, SHA512C1d, SHA512C1e, SHA512C1f,
42 SHA512C20, SHA512C21, SHA512C22, SHA512C23,
43 SHA512C24, SHA512C25, SHA512C26, SHA512C27,
44 SHA512C28, SHA512C29, SHA512C2a, SHA512C2b,
45 SHA512C2c, SHA512C2d, SHA512C2e, SHA512C2f,
46 SHA512C30, SHA512C31, SHA512C32, SHA512C33,
47 SHA512C34, SHA512C35, SHA512C36, SHA512C37,
48 SHA512C38, SHA512C39, SHA512C3a, SHA512C3b,
49 SHA512C3c, SHA512C3d, SHA512C3e, SHA512C3f,
50 SHA512C40, SHA512C41, SHA512C42, SHA512C43,
51 SHA512C44, SHA512C45, SHA512C46, SHA512C47,
52 SHA512C48, SHA512C49, SHA512C4a, SHA512C4b,
53 SHA512C4c, SHA512C4d, SHA512C4e, SHA512C4f,
56 #define ROUND_EXPAND() \
58 w0_t = SHA512_EXPAND (we_t, w9_t, w1_t, w0_t); \
59 w1_t = SHA512_EXPAND (wf_t, wa_t, w2_t, w1_t); \
60 w2_t = SHA512_EXPAND (w0_t, wb_t, w3_t, w2_t); \
61 w3_t = SHA512_EXPAND (w1_t, wc_t, w4_t, w3_t); \
62 w4_t = SHA512_EXPAND (w2_t, wd_t, w5_t, w4_t); \
63 w5_t = SHA512_EXPAND (w3_t, we_t, w6_t, w5_t); \
64 w6_t = SHA512_EXPAND (w4_t, wf_t, w7_t, w6_t); \
65 w7_t = SHA512_EXPAND (w5_t, w0_t, w8_t, w7_t); \
66 w8_t = SHA512_EXPAND (w6_t, w1_t, w9_t, w8_t); \
67 w9_t = SHA512_EXPAND (w7_t, w2_t, wa_t, w9_t); \
68 wa_t = SHA512_EXPAND (w8_t, w3_t, wb_t, wa_t); \
69 wb_t = SHA512_EXPAND (w9_t, w4_t, wc_t, wb_t); \
70 wc_t = SHA512_EXPAND (wa_t, w5_t, wd_t, wc_t); \
71 wd_t = SHA512_EXPAND (wb_t, w6_t, we_t, wd_t); \
72 we_t = SHA512_EXPAND (wc_t, w7_t, wf_t, we_t); \
73 wf_t = SHA512_EXPAND (wd_t, w8_t, w0_t, wf_t); \
76 #define ROUND_STEP(i) \
78 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w0_t, k[i + 0]); \
79 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w1_t, k[i + 1]); \
80 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, w2_t, k[i + 2]); \
81 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, w3_t, k[i + 3]); \
82 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, w4_t, k[i + 4]); \
83 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, w5_t, k[i + 5]); \
84 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, w6_t, k[i + 6]); \
85 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, w7_t, k[i + 7]); \
86 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w8_t, k[i + 8]); \
87 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w9_t, k[i + 9]); \
88 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, wa_t, k[i + 10]); \
89 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, wb_t, k[i + 11]); \
90 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, wc_t, k[i + 12]); \
91 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, wd_t, k[i + 13]); \
92 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, we_t, k[i + 14]); \
93 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, wf_t, k[i + 15]); \
96 __device__ static void sha512_transform (const u64 w[16], u64 dgst[8])
126 for (int i = 16; i < 80; i += 16)
128 ROUND_EXPAND (); ROUND_STEP (i);
141 extern "C" __global__ void __launch_bounds__ (256, 1) m12200_init (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, ecryptfs_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
147 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
149 if (gid >= gid_max) return;
153 w0[0] = pws[gid].i[ 0];
154 w0[1] = pws[gid].i[ 1];
155 w0[2] = pws[gid].i[ 2];
156 w0[3] = pws[gid].i[ 3];
160 w1[0] = pws[gid].i[ 4];
161 w1[1] = pws[gid].i[ 5];
162 w1[2] = pws[gid].i[ 6];
163 w1[3] = pws[gid].i[ 7];
167 w2[0] = pws[gid].i[ 8];
168 w2[1] = pws[gid].i[ 9];
169 w2[2] = pws[gid].i[10];
170 w2[3] = pws[gid].i[11];
174 w3[0] = pws[gid].i[12];
175 w3[1] = pws[gid].i[13];
176 w3[2] = pws[gid].i[14];
177 w3[3] = pws[gid].i[15];
179 u32 pw_len = pws[gid].pw_len;
181 append_0x80_4 (w0, w1, w2, w3, pw_len);
183 w0[0] = swap_workaround (w0[0]);
184 w0[1] = swap_workaround (w0[1]);
185 w0[2] = swap_workaround (w0[2]);
186 w0[3] = swap_workaround (w0[3]);
187 w1[0] = swap_workaround (w1[0]);
188 w1[1] = swap_workaround (w1[1]);
189 w1[2] = swap_workaround (w1[2]);
190 w1[3] = swap_workaround (w1[3]);
191 w2[0] = swap_workaround (w2[0]);
192 w2[1] = swap_workaround (w2[1]);
193 w2[2] = swap_workaround (w2[2]);
194 w2[3] = swap_workaround (w2[3]);
195 w3[0] = swap_workaround (w3[0]);
196 w3[1] = swap_workaround (w3[1]);
197 w3[2] = swap_workaround (w3[2]);
198 w3[3] = swap_workaround (w3[3]);
206 s0[0] = salt_bufs[salt_pos].salt_buf[0];
207 s0[1] = salt_bufs[salt_pos].salt_buf[1];
209 u32 salt_len = salt_bufs[salt_pos].salt_len;
213 w[ 0] = hl32_to_64 (s0[0], s0[1]);
214 w[ 1] = hl32_to_64 (w0[0], w0[1]);
215 w[ 2] = hl32_to_64 (w0[2], w0[3]);
216 w[ 3] = hl32_to_64 (w1[0], w1[1]);
217 w[ 4] = hl32_to_64 (w1[2], w1[3]);
218 w[ 5] = hl32_to_64 (w2[0], w2[1]);
219 w[ 6] = hl32_to_64 (w2[2], w2[3]);
220 w[ 7] = hl32_to_64 (w3[0], w3[1]);
221 w[ 8] = hl32_to_64 (w3[2], w3[3]);
228 w[15] = (salt_len + pw_len) * 8;
241 sha512_transform (w, dgst);
243 tmps[gid].out[0] = dgst[0];
244 tmps[gid].out[1] = dgst[1];
245 tmps[gid].out[2] = dgst[2];
246 tmps[gid].out[3] = dgst[3];
247 tmps[gid].out[4] = dgst[4];
248 tmps[gid].out[5] = dgst[5];
249 tmps[gid].out[6] = dgst[6];
250 tmps[gid].out[7] = dgst[7];
253 extern "C" __global__ void __launch_bounds__ (256, 1) m12200_loop (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, ecryptfs_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
255 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
257 if (gid >= gid_max) return;
261 dgst[0] = tmps[gid].out[0];
262 dgst[1] = tmps[gid].out[1];
263 dgst[2] = tmps[gid].out[2];
264 dgst[3] = tmps[gid].out[3];
265 dgst[4] = tmps[gid].out[4];
266 dgst[5] = tmps[gid].out[5];
267 dgst[6] = tmps[gid].out[6];
268 dgst[7] = tmps[gid].out[7];
270 for (u32 i = 0; i < loop_cnt; i++)
282 w[ 8] = 0x8000000000000000;
300 sha512_transform (w, dgst);
303 tmps[gid].out[0] = dgst[0];
304 tmps[gid].out[1] = dgst[1];
305 tmps[gid].out[2] = dgst[2];
306 tmps[gid].out[3] = dgst[3];
307 tmps[gid].out[4] = dgst[4];
308 tmps[gid].out[5] = dgst[5];
309 tmps[gid].out[6] = dgst[6];
310 tmps[gid].out[7] = dgst[7];
313 extern "C" __global__ void __launch_bounds__ (256, 1) m12200_comp (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, ecryptfs_tmp_t *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
319 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
321 if (gid >= gid_max) return;
323 const u32 lid = threadIdx.x;
325 const u64x a = tmps[gid].out[0];
327 const u32x r0 = h32_from_64 (a);
328 const u32x r1 = l32_from_64 (a);
334 #include VECT_COMPARE_M