2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
30 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
34 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
35 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
39 #define uint_to_hex_lower8_le(i) l_bin2asc[(i)]
43 #define uint_to_hex_lower8_le(i) u32x (l_bin2asc[(i).x], l_bin2asc[(i).y])
46 __device__ __constant__ char c_bin2asc[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
48 __device__ __shared__ short l_bin2asc[256];
50 __device__ __constant__ bf_t c_bfs[1024];
52 __device__ static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
80 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
81 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
82 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
83 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
84 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
85 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
86 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
87 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
88 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
89 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
90 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
91 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
92 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
93 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
94 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
95 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
96 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
97 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
98 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
99 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
104 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
105 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
106 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
107 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
108 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
109 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
110 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
111 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
112 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
113 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
114 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
115 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
116 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
117 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
118 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
119 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
120 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
121 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
122 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
123 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
128 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
129 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
130 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
131 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
132 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
133 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
134 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
135 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
136 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
137 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
138 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
139 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
140 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
141 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
142 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
143 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
144 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
145 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
146 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
147 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
152 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
153 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
154 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
155 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
156 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
157 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
158 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
159 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
160 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
161 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
162 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
163 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
164 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
165 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
166 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
167 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
168 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
169 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
170 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
171 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
180 __device__ static void m08400m (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
186 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
187 const u32 lid = threadIdx.x;
195 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
196 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
197 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
198 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
202 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
203 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
204 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
205 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
209 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
210 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
214 const u32 salt_len = salt_bufs[salt_pos].salt_len;
222 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
224 const u32 w0r = c_bfs[il_pos].i;
258 w3_t[3] = pw_len * 8;
268 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
282 w0_t[0] = salt_buf0[0];
283 w0_t[1] = salt_buf0[1];
284 w0_t[2] = salt_buf0[2];
285 w0_t[3] = salt_buf0[3];
286 w1_t[0] = salt_buf1[0];
287 w1_t[1] = salt_buf1[1];
288 w1_t[2] = salt_buf1[2];
289 w1_t[3] = salt_buf1[3];
290 w2_t[0] = salt_buf2[0];
291 w2_t[1] = salt_buf2[1];
292 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
293 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
294 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
295 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
296 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
297 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
298 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
299 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
300 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
301 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
302 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
303 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
311 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
313 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
314 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
315 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
316 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
317 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
318 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
319 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
320 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
321 w1_t[0] = 0x80000000;
334 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
342 w0_t[0] = salt_buf0[0];
343 w0_t[1] = salt_buf0[1];
344 w0_t[2] = salt_buf0[2];
345 w0_t[3] = salt_buf0[3];
346 w1_t[0] = salt_buf1[0];
347 w1_t[1] = salt_buf1[1];
348 w1_t[2] = salt_buf1[2];
349 w1_t[3] = salt_buf1[3];
350 w2_t[0] = salt_buf2[0];
351 w2_t[1] = salt_buf2[1];
352 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
353 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
354 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
355 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
356 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
357 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
358 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
359 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
360 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
361 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
362 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
363 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
371 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
373 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
374 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
375 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
376 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
377 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
378 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
379 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
380 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
381 w1_t[0] = 0x80000000;
392 w3_t[3] = (salt_len + 40) * 8;
394 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
396 const u32x r0 = digest[3];
397 const u32x r1 = digest[4];
398 const u32x r2 = digest[2];
399 const u32x r3 = digest[1];
401 #include VECT_COMPARE_M
405 __device__ static void m08400s (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
411 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
412 const u32 lid = threadIdx.x;
418 const u32 search[4] =
420 digests_buf[digests_offset].digest_buf[DGST_R0],
421 digests_buf[digests_offset].digest_buf[DGST_R1],
422 digests_buf[digests_offset].digest_buf[DGST_R2],
423 digests_buf[digests_offset].digest_buf[DGST_R3]
432 salt_buf0[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
433 salt_buf0[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
434 salt_buf0[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]);
435 salt_buf0[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 3]);
439 salt_buf1[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 4]);
440 salt_buf1[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 5]);
441 salt_buf1[2] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 6]);
442 salt_buf1[3] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 7]);
446 salt_buf2[0] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 8]);
447 salt_buf2[1] = swap_workaround (salt_bufs[salt_pos].salt_buf[ 9]);
451 const u32 salt_len = salt_bufs[salt_pos].salt_len;
459 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
461 const u32 w0r = c_bfs[il_pos].i;
495 w3_t[3] = pw_len * 8;
505 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
519 w0_t[0] = salt_buf0[0];
520 w0_t[1] = salt_buf0[1];
521 w0_t[2] = salt_buf0[2];
522 w0_t[3] = salt_buf0[3];
523 w1_t[0] = salt_buf1[0];
524 w1_t[1] = salt_buf1[1];
525 w1_t[2] = salt_buf1[2];
526 w1_t[3] = salt_buf1[3];
527 w2_t[0] = salt_buf2[0];
528 w2_t[1] = salt_buf2[1];
529 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
530 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
531 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
532 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
533 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
534 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
535 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
536 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
537 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
538 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
539 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
540 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
548 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
550 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
551 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
552 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
553 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
554 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
555 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
556 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
557 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
558 w1_t[0] = 0x80000000;
569 w3_t[3] = (salt_len + 40) * 8;
571 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
579 w0_t[0] = salt_buf0[0];
580 w0_t[1] = salt_buf0[1];
581 w0_t[2] = salt_buf0[2];
582 w0_t[3] = salt_buf0[3];
583 w1_t[0] = salt_buf1[0];
584 w1_t[1] = salt_buf1[1];
585 w1_t[2] = salt_buf1[2];
586 w1_t[3] = salt_buf1[3];
587 w2_t[0] = salt_buf2[0];
588 w2_t[1] = salt_buf2[1];
589 w2_t[2] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
590 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
591 w2_t[3] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
592 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
593 w3_t[0] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
594 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
595 w3_t[1] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
596 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
597 w3_t[2] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
598 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
599 w3_t[3] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
600 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
608 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
610 w0_t[0] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
611 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
612 w0_t[1] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
613 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
614 w0_t[2] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
615 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
616 w0_t[3] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
617 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
618 w1_t[0] = 0x80000000;
629 w3_t[3] = (salt_len + 40) * 8;
631 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
633 const u32x r0 = digest[3];
634 const u32x r1 = digest[4];
635 const u32x r2 = digest[2];
636 const u32x r3 = digest[1];
638 #include VECT_COMPARE_S
642 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
648 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
654 const u32 lid = threadIdx.x;
659 w0[0] = pws[gid].i[ 0];
660 w0[1] = pws[gid].i[ 1];
661 w0[2] = pws[gid].i[ 2];
662 w0[3] = pws[gid].i[ 3];
683 w3[3] = pws[gid].i[15];
685 const u32 pw_len = pws[gid].pw_len;
691 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
692 | c_bin2asc[(lid >> 4) & 15] << 8;
696 if (gid >= gid_max) return;
702 m08400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
705 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
711 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
717 const u32 lid = threadIdx.x;
721 w0[0] = pws[gid].i[ 0];
722 w0[1] = pws[gid].i[ 1];
723 w0[2] = pws[gid].i[ 2];
724 w0[3] = pws[gid].i[ 3];
728 w1[0] = pws[gid].i[ 4];
729 w1[1] = pws[gid].i[ 5];
730 w1[2] = pws[gid].i[ 6];
731 w1[3] = pws[gid].i[ 7];
745 w3[3] = pws[gid].i[15];
747 const u32 pw_len = pws[gid].pw_len;
753 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
754 | c_bin2asc[(lid >> 4) & 15] << 8;
758 if (gid >= gid_max) return;
764 m08400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
767 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
773 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
779 const u32 lid = threadIdx.x;
783 w0[0] = pws[gid].i[ 0];
784 w0[1] = pws[gid].i[ 1];
785 w0[2] = pws[gid].i[ 2];
786 w0[3] = pws[gid].i[ 3];
790 w1[0] = pws[gid].i[ 4];
791 w1[1] = pws[gid].i[ 5];
792 w1[2] = pws[gid].i[ 6];
793 w1[3] = pws[gid].i[ 7];
797 w2[0] = pws[gid].i[ 8];
798 w2[1] = pws[gid].i[ 9];
799 w2[2] = pws[gid].i[10];
800 w2[3] = pws[gid].i[11];
804 w3[0] = pws[gid].i[12];
805 w3[1] = pws[gid].i[13];
806 w3[2] = pws[gid].i[14];
807 w3[3] = pws[gid].i[15];
809 const u32 pw_len = pws[gid].pw_len;
815 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
816 | c_bin2asc[(lid >> 4) & 15] << 8;
820 if (gid >= gid_max) return;
826 m08400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
829 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
835 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
841 const u32 lid = threadIdx.x;
845 w0[0] = pws[gid].i[ 0];
846 w0[1] = pws[gid].i[ 1];
847 w0[2] = pws[gid].i[ 2];
848 w0[3] = pws[gid].i[ 3];
869 w3[3] = pws[gid].i[15];
871 const u32 pw_len = pws[gid].pw_len;
877 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
878 | c_bin2asc[(lid >> 4) & 15] << 8;
882 if (gid >= gid_max) return;
888 m08400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
891 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
897 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
903 const u32 lid = threadIdx.x;
907 w0[0] = pws[gid].i[ 0];
908 w0[1] = pws[gid].i[ 1];
909 w0[2] = pws[gid].i[ 2];
910 w0[3] = pws[gid].i[ 3];
914 w1[0] = pws[gid].i[ 4];
915 w1[1] = pws[gid].i[ 5];
916 w1[2] = pws[gid].i[ 6];
917 w1[3] = pws[gid].i[ 7];
931 w3[3] = pws[gid].i[15];
933 const u32 pw_len = pws[gid].pw_len;
939 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
940 | c_bin2asc[(lid >> 4) & 15] << 8;
944 if (gid >= gid_max) return;
950 m08400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
953 extern "C" __global__ void __launch_bounds__ (256, 1) m08400_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
959 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
965 const u32 lid = threadIdx.x;
969 w0[0] = pws[gid].i[ 0];
970 w0[1] = pws[gid].i[ 1];
971 w0[2] = pws[gid].i[ 2];
972 w0[3] = pws[gid].i[ 3];
976 w1[0] = pws[gid].i[ 4];
977 w1[1] = pws[gid].i[ 5];
978 w1[2] = pws[gid].i[ 6];
979 w1[3] = pws[gid].i[ 7];
983 w2[0] = pws[gid].i[ 8];
984 w2[1] = pws[gid].i[ 9];
985 w2[2] = pws[gid].i[10];
986 w2[3] = pws[gid].i[11];
990 w3[0] = pws[gid].i[12];
991 w3[1] = pws[gid].i[13];
992 w3[2] = pws[gid].i[14];
993 w3[3] = pws[gid].i[15];
995 const u32 pw_len = pws[gid].pw_len;
1001 l_bin2asc[lid] = c_bin2asc[(lid >> 0) & 15] << 0
1002 | c_bin2asc[(lid >> 4) & 15] << 8;
1006 if (gid >= gid_max) return;
1012 m08400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);