2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
27 #include "include/rp_gpu.h"
31 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
32 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
36 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
37 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
41 #define uint_to_hex_lower8_le(i) l_bin2asc[(i)]
45 #define uint_to_hex_lower8_le(i) u32x (l_bin2asc[(i).x], l_bin2asc[(i).y])
48 __device__ __constant__ char c_bin2asc[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
50 __device__ __shared__ short l_bin2asc[256];
52 __device__ __constant__ gpu_rule_t c_rules[1024];
54 __device__ static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
82 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
83 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
84 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
85 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
86 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
87 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
88 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
89 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
90 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
91 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
92 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
93 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
94 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
95 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
96 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
97 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
98 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
99 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
100 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
101 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
106 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
107 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
108 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
109 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
110 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
111 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
112 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
113 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
114 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
115 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
116 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
117 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
118 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
119 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
120 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
121 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
122 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
123 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
124 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
125 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
130 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
131 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
132 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
133 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
134 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
135 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
136 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
137 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
138 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
139 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
140 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
141 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
142 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
143 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
144 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
145 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
146 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
147 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
148 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
149 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
154 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
155 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
156 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
157 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
158 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
159 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
160 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
161 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
162 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
163 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
164 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
165 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
166 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
167 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
168 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
169 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
170 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
171 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
172 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
173 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
182 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
188 const u32 lid = threadIdx.x;
194 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
198 pw_buf0[0] = pws[gid].i[ 0];
199 pw_buf0[1] = pws[gid].i[ 1];
200 pw_buf0[2] = pws[gid].i[ 2];
201 pw_buf0[3] = pws[gid].i[ 3];
205 pw_buf1[0] = pws[gid].i[ 4];
206 pw_buf1[1] = pws[gid].i[ 5];
207 pw_buf1[2] = pws[gid].i[ 6];
208 pw_buf1[3] = pws[gid].i[ 7];
210 const u32 pw_len = pws[gid].pw_len;
216 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
217 | c_bin2asc[(lid >> 4) & 15] << 8;
221 if (gid >= gid_max) return;
229 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
230 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
231 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
232 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
236 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
237 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
238 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
239 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
243 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
244 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
248 const u32 salt_len = salt_bufs[salt_pos].salt_len;
254 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
284 const u32 out_len = apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
286 append_0x80_2 (w0, w1, out_len);
290 w0_t[0] = swap_workaround (w0[0]);
291 w0_t[1] = swap_workaround (w0[1]);
292 w0_t[2] = swap_workaround (w0[2]);
293 w0_t[3] = swap_workaround (w0[3]);
297 w1_t[0] = swap_workaround (w1[0]);
298 w1_t[1] = swap_workaround (w1[1]);
299 w1_t[2] = swap_workaround (w1[2]);
300 w1_t[3] = swap_workaround (w1[3]);
304 w2_t[0] = swap_workaround (w2[0]);
305 w2_t[1] = swap_workaround (w2[1]);
306 w2_t[2] = swap_workaround (w2[2]);
307 w2_t[3] = swap_workaround (w2[3]);
311 w3_t[0] = swap_workaround (w3[0]);
312 w3_t[1] = swap_workaround (w3[1]);
314 w3_t[3] = pw_len * 8;
324 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
338 w0_t[0] = salt_buf0[0];
339 w0_t[1] = salt_buf0[1];
340 w0_t[2] = salt_buf0[2];
341 w0_t[3] = salt_buf0[3];
342 w1_t[0] = salt_buf1[0];
343 w1_t[1] = salt_buf1[1];
344 w1_t[2] = salt_buf1[2];
345 w1_t[3] = salt_buf1[3];
346 w2_t[0] = salt_buf2[0];
347 w2_t[1] = salt_buf2[1];
348 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
349 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
350 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
351 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
352 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
353 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
354 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
355 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
356 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
357 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
358 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
359 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
367 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
369 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
370 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
371 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
372 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
373 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
374 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
375 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
376 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
377 w1_t[0] = 0x80000000;
388 w3_t[3] = (salt_len + 40) * 8;
390 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
398 w0_t[0] = salt_buf0[0];
399 w0_t[1] = salt_buf0[1];
400 w0_t[2] = salt_buf0[2];
401 w0_t[3] = salt_buf0[3];
402 w1_t[0] = salt_buf1[0];
403 w1_t[1] = salt_buf1[1];
404 w1_t[2] = salt_buf1[2];
405 w1_t[3] = salt_buf1[3];
406 w2_t[0] = salt_buf2[0];
407 w2_t[1] = salt_buf2[1];
408 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
409 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
410 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
411 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
412 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
413 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
414 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
415 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
416 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
417 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
418 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
419 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
427 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
429 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
430 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
431 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
432 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
433 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
434 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
435 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
436 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
437 w1_t[0] = 0x80000000;
448 w3_t[3] = (salt_len + 40) * 8;
450 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
452 const u32x r0 = digest[3];
453 const u32x r1 = digest[4];
454 const u32x r2 = digest[2];
455 const u32x r3 = digest[1];
457 #include VECT_COMPARE_M
461 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
465 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
469 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
475 const u32 lid = threadIdx.x;
481 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
485 pw_buf0[0] = pws[gid].i[ 0];
486 pw_buf0[1] = pws[gid].i[ 1];
487 pw_buf0[2] = pws[gid].i[ 2];
488 pw_buf0[3] = pws[gid].i[ 3];
492 pw_buf1[0] = pws[gid].i[ 4];
493 pw_buf1[1] = pws[gid].i[ 5];
494 pw_buf1[2] = pws[gid].i[ 6];
495 pw_buf1[3] = pws[gid].i[ 7];
497 const u32 pw_len = pws[gid].pw_len;
503 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
504 | c_bin2asc[(lid >> 4) & 15] << 8;
508 if (gid >= gid_max) return;
516 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
517 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
518 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
519 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
523 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
524 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
525 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
526 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
530 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
531 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
535 const u32 salt_len = salt_bufs[salt_pos].salt_len;
541 const u32 search[4] =
543 digests_buf[digests_offset].digest_buf[DGST_R0],
544 digests_buf[digests_offset].digest_buf[DGST_R1],
545 digests_buf[digests_offset].digest_buf[DGST_R2],
546 digests_buf[digests_offset].digest_buf[DGST_R3]
553 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
583 const u32 out_len = apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
585 append_0x80_2 (w0, w1, out_len);
589 w0_t[0] = swap_workaround (w0[0]);
590 w0_t[1] = swap_workaround (w0[1]);
591 w0_t[2] = swap_workaround (w0[2]);
592 w0_t[3] = swap_workaround (w0[3]);
596 w1_t[0] = swap_workaround (w1[0]);
597 w1_t[1] = swap_workaround (w1[1]);
598 w1_t[2] = swap_workaround (w1[2]);
599 w1_t[3] = swap_workaround (w1[3]);
603 w2_t[0] = swap_workaround (w2[0]);
604 w2_t[1] = swap_workaround (w2[1]);
605 w2_t[2] = swap_workaround (w2[2]);
606 w2_t[3] = swap_workaround (w2[3]);
610 w3_t[0] = swap_workaround (w3[0]);
611 w3_t[1] = swap_workaround (w3[1]);
613 w3_t[3] = pw_len * 8;
623 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
637 w0_t[0] = salt_buf0[0];
638 w0_t[1] = salt_buf0[1];
639 w0_t[2] = salt_buf0[2];
640 w0_t[3] = salt_buf0[3];
641 w1_t[0] = salt_buf1[0];
642 w1_t[1] = salt_buf1[1];
643 w1_t[2] = salt_buf1[2];
644 w1_t[3] = salt_buf1[3];
645 w2_t[0] = salt_buf2[0];
646 w2_t[1] = salt_buf2[1];
647 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
648 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
649 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
650 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
651 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
652 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
653 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
654 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
655 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
656 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
657 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
658 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
666 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
668 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
669 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
670 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
671 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
672 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
673 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
674 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
675 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
676 w1_t[0] = 0x80000000;
689 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
697 w0_t[0] = salt_buf0[0];
698 w0_t[1] = salt_buf0[1];
699 w0_t[2] = salt_buf0[2];
700 w0_t[3] = salt_buf0[3];
701 w1_t[0] = salt_buf1[0];
702 w1_t[1] = salt_buf1[1];
703 w1_t[2] = salt_buf1[2];
704 w1_t[3] = salt_buf1[3];
705 w2_t[0] = salt_buf2[0];
706 w2_t[1] = salt_buf2[1];
707 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
708 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
709 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
710 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
711 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
712 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
713 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
714 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
715 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
716 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
717 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
718 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
726 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
728 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
729 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
730 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
731 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
732 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
733 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
734 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
735 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
736 w1_t[0] = 0x80000000;
749 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
751 const u32x r0 = digest[3];
752 const u32x r1 = digest[4];
753 const u32x r2 = digest[2];
754 const u32x r3 = digest[1];
756 #include VECT_COMPARE_S
760 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
764 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)