2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
36 __constant u64 k_sha512[80] =
38 SHA512C00, SHA512C01, SHA512C02, SHA512C03,
39 SHA512C04, SHA512C05, SHA512C06, SHA512C07,
40 SHA512C08, SHA512C09, SHA512C0a, SHA512C0b,
41 SHA512C0c, SHA512C0d, SHA512C0e, SHA512C0f,
42 SHA512C10, SHA512C11, SHA512C12, SHA512C13,
43 SHA512C14, SHA512C15, SHA512C16, SHA512C17,
44 SHA512C18, SHA512C19, SHA512C1a, SHA512C1b,
45 SHA512C1c, SHA512C1d, SHA512C1e, SHA512C1f,
46 SHA512C20, SHA512C21, SHA512C22, SHA512C23,
47 SHA512C24, SHA512C25, SHA512C26, SHA512C27,
48 SHA512C28, SHA512C29, SHA512C2a, SHA512C2b,
49 SHA512C2c, SHA512C2d, SHA512C2e, SHA512C2f,
50 SHA512C30, SHA512C31, SHA512C32, SHA512C33,
51 SHA512C34, SHA512C35, SHA512C36, SHA512C37,
52 SHA512C38, SHA512C39, SHA512C3a, SHA512C3b,
53 SHA512C3c, SHA512C3d, SHA512C3e, SHA512C3f,
54 SHA512C40, SHA512C41, SHA512C42, SHA512C43,
55 SHA512C44, SHA512C45, SHA512C46, SHA512C47,
56 SHA512C48, SHA512C49, SHA512C4a, SHA512C4b,
57 SHA512C4c, SHA512C4d, SHA512C4e, SHA512C4f,
60 static void sha512_transform (const u64 w[16], u64 dgst[8])
88 #define ROUND_EXPAND() \
90 w0_t = SHA512_EXPAND (we_t, w9_t, w1_t, w0_t); \
91 w1_t = SHA512_EXPAND (wf_t, wa_t, w2_t, w1_t); \
92 w2_t = SHA512_EXPAND (w0_t, wb_t, w3_t, w2_t); \
93 w3_t = SHA512_EXPAND (w1_t, wc_t, w4_t, w3_t); \
94 w4_t = SHA512_EXPAND (w2_t, wd_t, w5_t, w4_t); \
95 w5_t = SHA512_EXPAND (w3_t, we_t, w6_t, w5_t); \
96 w6_t = SHA512_EXPAND (w4_t, wf_t, w7_t, w6_t); \
97 w7_t = SHA512_EXPAND (w5_t, w0_t, w8_t, w7_t); \
98 w8_t = SHA512_EXPAND (w6_t, w1_t, w9_t, w8_t); \
99 w9_t = SHA512_EXPAND (w7_t, w2_t, wa_t, w9_t); \
100 wa_t = SHA512_EXPAND (w8_t, w3_t, wb_t, wa_t); \
101 wb_t = SHA512_EXPAND (w9_t, w4_t, wc_t, wb_t); \
102 wc_t = SHA512_EXPAND (wa_t, w5_t, wd_t, wc_t); \
103 wd_t = SHA512_EXPAND (wb_t, w6_t, we_t, wd_t); \
104 we_t = SHA512_EXPAND (wc_t, w7_t, wf_t, we_t); \
105 wf_t = SHA512_EXPAND (wd_t, w8_t, w0_t, wf_t); \
108 #define ROUND_STEP(i) \
110 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha512[i + 0]); \
111 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha512[i + 1]); \
112 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha512[i + 2]); \
113 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha512[i + 3]); \
114 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha512[i + 4]); \
115 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha512[i + 5]); \
116 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha512[i + 6]); \
117 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha512[i + 7]); \
118 SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha512[i + 8]); \
119 SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha512[i + 9]); \
120 SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha512[i + 10]); \
121 SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha512[i + 11]); \
122 SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha512[i + 12]); \
123 SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha512[i + 13]); \
124 SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, we_t, k_sha512[i + 14]); \
125 SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha512[i + 15]); \
130 //#pragma unroll // kernel fails if used
131 for (int i = 16; i < 80; i += 16)
133 ROUND_EXPAND (); ROUND_STEP (i);
146 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m12200_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global ecryptfs_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
152 const u32 gid = get_global_id (0);
154 if (gid >= gid_max) return;
158 w0[0] = pws[gid].i[ 0];
159 w0[1] = pws[gid].i[ 1];
160 w0[2] = pws[gid].i[ 2];
161 w0[3] = pws[gid].i[ 3];
165 w1[0] = pws[gid].i[ 4];
166 w1[1] = pws[gid].i[ 5];
167 w1[2] = pws[gid].i[ 6];
168 w1[3] = pws[gid].i[ 7];
172 w2[0] = pws[gid].i[ 8];
173 w2[1] = pws[gid].i[ 9];
174 w2[2] = pws[gid].i[10];
175 w2[3] = pws[gid].i[11];
179 w3[0] = pws[gid].i[12];
180 w3[1] = pws[gid].i[13];
181 w3[2] = pws[gid].i[14];
182 w3[3] = pws[gid].i[15];
184 u32 pw_len = pws[gid].pw_len;
186 append_0x80_4 (w0, w1, w2, w3, pw_len);
188 w0[0] = swap_workaround (w0[0]);
189 w0[1] = swap_workaround (w0[1]);
190 w0[2] = swap_workaround (w0[2]);
191 w0[3] = swap_workaround (w0[3]);
192 w1[0] = swap_workaround (w1[0]);
193 w1[1] = swap_workaround (w1[1]);
194 w1[2] = swap_workaround (w1[2]);
195 w1[3] = swap_workaround (w1[3]);
196 w2[0] = swap_workaround (w2[0]);
197 w2[1] = swap_workaround (w2[1]);
198 w2[2] = swap_workaround (w2[2]);
199 w2[3] = swap_workaround (w2[3]);
200 w3[0] = swap_workaround (w3[0]);
201 w3[1] = swap_workaround (w3[1]);
202 w3[2] = swap_workaround (w3[2]);
203 w3[3] = swap_workaround (w3[3]);
211 s0[0] = salt_bufs[salt_pos].salt_buf[0];
212 s0[1] = salt_bufs[salt_pos].salt_buf[1];
214 u32 salt_len = salt_bufs[salt_pos].salt_len;
218 w[ 0] = hl32_to_64 (s0[0], s0[1]);
219 w[ 1] = hl32_to_64 (w0[0], w0[1]);
220 w[ 2] = hl32_to_64 (w0[2], w0[3]);
221 w[ 3] = hl32_to_64 (w1[0], w1[1]);
222 w[ 4] = hl32_to_64 (w1[2], w1[3]);
223 w[ 5] = hl32_to_64 (w2[0], w2[1]);
224 w[ 6] = hl32_to_64 (w2[2], w2[3]);
225 w[ 7] = hl32_to_64 (w3[0], w3[1]);
226 w[ 8] = hl32_to_64 (w3[2], w3[3]);
233 w[15] = (salt_len + pw_len) * 8;
246 sha512_transform (w, dgst);
248 tmps[gid].out[0] = dgst[0];
249 tmps[gid].out[1] = dgst[1];
250 tmps[gid].out[2] = dgst[2];
251 tmps[gid].out[3] = dgst[3];
252 tmps[gid].out[4] = dgst[4];
253 tmps[gid].out[5] = dgst[5];
254 tmps[gid].out[6] = dgst[6];
255 tmps[gid].out[7] = dgst[7];
258 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m12200_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global ecryptfs_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
260 const u32 gid = get_global_id (0);
262 if (gid >= gid_max) return;
266 dgst[0] = tmps[gid].out[0];
267 dgst[1] = tmps[gid].out[1];
268 dgst[2] = tmps[gid].out[2];
269 dgst[3] = tmps[gid].out[3];
270 dgst[4] = tmps[gid].out[4];
271 dgst[5] = tmps[gid].out[5];
272 dgst[6] = tmps[gid].out[6];
273 dgst[7] = tmps[gid].out[7];
275 for (u32 i = 0; i < loop_cnt; i++)
287 w[ 8] = 0x8000000000000000;
305 sha512_transform (w, dgst);
308 tmps[gid].out[0] = dgst[0];
309 tmps[gid].out[1] = dgst[1];
310 tmps[gid].out[2] = dgst[2];
311 tmps[gid].out[3] = dgst[3];
312 tmps[gid].out[4] = dgst[4];
313 tmps[gid].out[5] = dgst[5];
314 tmps[gid].out[6] = dgst[6];
315 tmps[gid].out[7] = dgst[7];
318 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m12200_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global ecryptfs_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
324 const u32 gid = get_global_id (0);
326 if (gid >= gid_max) return;
328 const u32 lid = get_local_id (0);
330 const u64x a = tmps[gid].out[0];
332 const u32x r0 = h32_from_64 (a);
333 const u32x r1 = l32_from_64 (a);
339 #include VECT_COMPARE_M