2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
30 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
34 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
35 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
38 __device__ __constant__ bf_t c_bfs[1024];
40 __device__ static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
68 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
69 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
70 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
71 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
72 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
73 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
74 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
75 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
76 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
77 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
78 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
79 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
80 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
81 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
82 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
83 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
84 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
85 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
86 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
87 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
92 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
93 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
94 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
95 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
96 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
97 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
98 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
99 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
100 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
101 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
102 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
103 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
104 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
105 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
106 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
107 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
108 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
109 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
110 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
111 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
116 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
117 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
118 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
119 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
120 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
121 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
122 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
123 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
124 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
125 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
126 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
127 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
128 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
129 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
130 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
131 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
132 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
133 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
134 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
135 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
140 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
141 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
142 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
143 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
144 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
145 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
146 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
147 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
148 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
149 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
150 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
151 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
152 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
153 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
154 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
155 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
156 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
157 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
158 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
159 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
168 __device__ static void m08300m (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
174 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
175 const u32 lid = threadIdx.x;
181 const u32 salt_iter = salt_bufs[salt_pos].salt_iter;
185 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
186 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
187 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
188 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
192 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
193 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
194 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
195 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
197 const u32 salt_len = salt_bufs[salt_pos].salt_len;
201 domain_buf0[0] = salt_bufs[salt_pos].salt_buf_pc[ 0];
202 domain_buf0[1] = salt_bufs[salt_pos].salt_buf_pc[ 1];
203 domain_buf0[2] = salt_bufs[salt_pos].salt_buf_pc[ 2];
204 domain_buf0[3] = salt_bufs[salt_pos].salt_buf_pc[ 3];
208 domain_buf1[0] = salt_bufs[salt_pos].salt_buf_pc[ 4];
209 domain_buf1[1] = salt_bufs[salt_pos].salt_buf_pc[ 5];
210 domain_buf1[2] = salt_bufs[salt_pos].salt_buf_pc[ 6];
213 const u32 domain_len = salt_bufs[salt_pos].salt_buf_pc[ 7];
221 s0[0] = salt_buf0[0];
222 s0[1] = salt_buf0[1];
223 s0[2] = salt_buf0[2];
224 s0[3] = salt_buf0[3];
228 s1[0] = salt_buf1[0];
229 s1[1] = salt_buf1[1];
230 s1[2] = salt_buf1[2];
231 s1[3] = salt_buf1[3];
247 switch_buffer_by_offset (s0, s1, s2, s3, 1 + pw_len + domain_len + 1);
251 d0[0] = domain_buf0[0];
252 d0[1] = domain_buf0[1];
253 d0[2] = domain_buf0[2];
254 d0[3] = domain_buf0[3];
258 d1[0] = domain_buf1[0];
259 d1[1] = domain_buf1[1];
260 d1[2] = domain_buf1[2];
277 switch_buffer_by_offset (d0, d1, d2, d3, 1 + pw_len);
285 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
287 const u32 w0r = c_bfs[il_pos].i;
319 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, 1);
321 w0_t[0] |= pw_len & 0xff;
329 w0_t2[0] = swap_workaround (w0_t[0] | d0[0] | s0[0]);
330 w0_t2[1] = swap_workaround (w0_t[1] | d0[1] | s0[1]);
331 w0_t2[2] = swap_workaround (w0_t[2] | d0[2] | s0[2]);
332 w0_t2[3] = swap_workaround (w0_t[3] | d0[3] | s0[3]);
336 w1_t2[0] = swap_workaround (w1_t[0] | d1[0] | s1[0]);
337 w1_t2[1] = swap_workaround (w1_t[1] | d1[1] | s1[1]);
338 w1_t2[2] = swap_workaround (w1_t[2] | d1[2] | s1[2]);
339 w1_t2[3] = swap_workaround (w1_t[3] | d1[3] | s1[3]);
343 w2_t2[0] = swap_workaround (w2_t[0] | d2[0] | s2[0]);
344 w2_t2[1] = swap_workaround (w2_t[1] | d2[1] | s2[1]);
345 w2_t2[2] = swap_workaround (w2_t[2] | d2[2] | s2[2]);
346 w2_t2[3] = swap_workaround (w2_t[3] | d2[3] | s2[3]);
350 w3_t2[0] = swap_workaround (w3_t[0] | d3[0] | s3[0]);
351 w3_t2[1] = swap_workaround (w3_t[1] | d3[1] | s3[1]);
353 w3_t2[3] = (1 + pw_len + domain_len + 1 + salt_len) * 8;
363 sha1_transform (w0_t2, w1_t2, w2_t2, w3_t2, digest);
367 for (u32 i = 0; i < salt_iter; i++)
371 w0_t3[0] = digest[0];
372 w0_t3[1] = digest[1];
373 w0_t3[2] = digest[2];
374 w0_t3[3] = digest[3];
378 w1_t3[0] = digest[4];
379 w1_t3[1] = swap_workaround (salt_buf0[0]);
380 w1_t3[2] = swap_workaround (salt_buf0[1]);
381 w1_t3[3] = swap_workaround (salt_buf0[2]);
385 w2_t3[0] = swap_workaround (salt_buf0[3]);
386 w2_t3[1] = swap_workaround (salt_buf1[0]);
387 w2_t3[2] = swap_workaround (salt_buf1[1]);
388 w2_t3[3] = swap_workaround (salt_buf1[2]);
392 w3_t3[0] = swap_workaround (salt_buf1[3]);
395 w3_t3[3] = (20 + salt_len) * 8;
403 sha1_transform (w0_t3, w1_t3, w2_t3, w3_t3, digest);
406 const u32x r0 = digest[3];
407 const u32x r1 = digest[4];
408 const u32x r2 = digest[2];
409 const u32x r3 = digest[1];
411 #include VECT_COMPARE_M
415 __device__ static void m08300s (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
421 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
422 const u32 lid = threadIdx.x;
428 const u32 search[4] =
430 digests_buf[digests_offset].digest_buf[DGST_R0],
431 digests_buf[digests_offset].digest_buf[DGST_R1],
432 digests_buf[digests_offset].digest_buf[DGST_R2],
433 digests_buf[digests_offset].digest_buf[DGST_R3]
440 const u32 salt_iter = salt_bufs[salt_pos].salt_iter;
444 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
445 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
446 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
447 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
451 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
452 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
453 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
454 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
456 const u32 salt_len = salt_bufs[salt_pos].salt_len;
460 domain_buf0[0] = salt_bufs[salt_pos].salt_buf_pc[ 0];
461 domain_buf0[1] = salt_bufs[salt_pos].salt_buf_pc[ 1];
462 domain_buf0[2] = salt_bufs[salt_pos].salt_buf_pc[ 2];
463 domain_buf0[3] = salt_bufs[salt_pos].salt_buf_pc[ 3];
467 domain_buf1[0] = salt_bufs[salt_pos].salt_buf_pc[ 4];
468 domain_buf1[1] = salt_bufs[salt_pos].salt_buf_pc[ 5];
469 domain_buf1[2] = salt_bufs[salt_pos].salt_buf_pc[ 6];
472 const u32 domain_len = salt_bufs[salt_pos].salt_buf_pc[ 7];
480 s0[0] = salt_buf0[0];
481 s0[1] = salt_buf0[1];
482 s0[2] = salt_buf0[2];
483 s0[3] = salt_buf0[3];
487 s1[0] = salt_buf1[0];
488 s1[1] = salt_buf1[1];
489 s1[2] = salt_buf1[2];
490 s1[3] = salt_buf1[3];
506 switch_buffer_by_offset (s0, s1, s2, s3, 1 + pw_len + domain_len + 1);
510 d0[0] = domain_buf0[0];
511 d0[1] = domain_buf0[1];
512 d0[2] = domain_buf0[2];
513 d0[3] = domain_buf0[3];
517 d1[0] = domain_buf1[0];
518 d1[1] = domain_buf1[1];
519 d1[2] = domain_buf1[2];
536 switch_buffer_by_offset (d0, d1, d2, d3, 1 + pw_len);
544 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
546 const u32 w0r = c_bfs[il_pos].i;
578 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, 1);
580 w0_t[0] |= pw_len & 0xff;
588 w0_t2[0] = swap_workaround (w0_t[0] | d0[0] | s0[0]);
589 w0_t2[1] = swap_workaround (w0_t[1] | d0[1] | s0[1]);
590 w0_t2[2] = swap_workaround (w0_t[2] | d0[2] | s0[2]);
591 w0_t2[3] = swap_workaround (w0_t[3] | d0[3] | s0[3]);
595 w1_t2[0] = swap_workaround (w1_t[0] | d1[0] | s1[0]);
596 w1_t2[1] = swap_workaround (w1_t[1] | d1[1] | s1[1]);
597 w1_t2[2] = swap_workaround (w1_t[2] | d1[2] | s1[2]);
598 w1_t2[3] = swap_workaround (w1_t[3] | d1[3] | s1[3]);
602 w2_t2[0] = swap_workaround (w2_t[0] | d2[0] | s2[0]);
603 w2_t2[1] = swap_workaround (w2_t[1] | d2[1] | s2[1]);
604 w2_t2[2] = swap_workaround (w2_t[2] | d2[2] | s2[2]);
605 w2_t2[3] = swap_workaround (w2_t[3] | d2[3] | s2[3]);
609 w3_t2[0] = swap_workaround (w3_t[0] | d3[0] | s3[0]);
610 w3_t2[1] = swap_workaround (w3_t[1] | d3[1] | s3[1]);
612 w3_t2[3] = (1 + pw_len + domain_len + 1 + salt_len) * 8;
622 sha1_transform (w0_t2, w1_t2, w2_t2, w3_t2, digest);
626 for (u32 i = 0; i < salt_iter; i++)
630 w0_t3[0] = digest[0];
631 w0_t3[1] = digest[1];
632 w0_t3[2] = digest[2];
633 w0_t3[3] = digest[3];
637 w1_t3[0] = digest[4];
638 w1_t3[1] = swap_workaround (salt_buf0[0]);
639 w1_t3[2] = swap_workaround (salt_buf0[1]);
640 w1_t3[3] = swap_workaround (salt_buf0[2]);
644 w2_t3[0] = swap_workaround (salt_buf0[3]);
645 w2_t3[1] = swap_workaround (salt_buf1[0]);
646 w2_t3[2] = swap_workaround (salt_buf1[1]);
647 w2_t3[3] = swap_workaround (salt_buf1[2]);
651 w3_t3[0] = swap_workaround (salt_buf1[3]);
654 w3_t3[3] = (20 + salt_len) * 8;
662 sha1_transform (w0_t3, w1_t3, w2_t3, w3_t3, digest);
665 const u32x r0 = digest[3];
666 const u32x r1 = digest[4];
667 const u32x r2 = digest[2];
668 const u32x r3 = digest[1];
670 #include VECT_COMPARE_S
674 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
680 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
682 if (gid >= gid_max) return;
686 w0[0] = pws[gid].i[ 0];
687 w0[1] = pws[gid].i[ 1];
688 w0[2] = pws[gid].i[ 2];
689 w0[3] = pws[gid].i[ 3];
712 const u32 pw_len = pws[gid].pw_len;
718 m08300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
721 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
727 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
729 if (gid >= gid_max) return;
733 w0[0] = pws[gid].i[ 0];
734 w0[1] = pws[gid].i[ 1];
735 w0[2] = pws[gid].i[ 2];
736 w0[3] = pws[gid].i[ 3];
740 w1[0] = pws[gid].i[ 4];
741 w1[1] = pws[gid].i[ 5];
742 w1[2] = pws[gid].i[ 6];
743 w1[3] = pws[gid].i[ 7];
759 const u32 pw_len = pws[gid].pw_len;
765 m08300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
768 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
774 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
776 if (gid >= gid_max) return;
780 w0[0] = pws[gid].i[ 0];
781 w0[1] = pws[gid].i[ 1];
782 w0[2] = pws[gid].i[ 2];
783 w0[3] = pws[gid].i[ 3];
787 w1[0] = pws[gid].i[ 4];
788 w1[1] = pws[gid].i[ 5];
789 w1[2] = pws[gid].i[ 6];
790 w1[3] = pws[gid].i[ 7];
794 w2[0] = pws[gid].i[ 8];
795 w2[1] = pws[gid].i[ 9];
796 w2[2] = pws[gid].i[10];
797 w2[3] = pws[gid].i[11];
801 w3[0] = pws[gid].i[12];
802 w3[1] = pws[gid].i[13];
806 const u32 pw_len = pws[gid].pw_len;
812 m08300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
815 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
821 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
823 if (gid >= gid_max) return;
827 w0[0] = pws[gid].i[ 0];
828 w0[1] = pws[gid].i[ 1];
829 w0[2] = pws[gid].i[ 2];
830 w0[3] = pws[gid].i[ 3];
853 const u32 pw_len = pws[gid].pw_len;
859 m08300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
862 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
868 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
870 if (gid >= gid_max) return;
874 w0[0] = pws[gid].i[ 0];
875 w0[1] = pws[gid].i[ 1];
876 w0[2] = pws[gid].i[ 2];
877 w0[3] = pws[gid].i[ 3];
881 w1[0] = pws[gid].i[ 4];
882 w1[1] = pws[gid].i[ 5];
883 w1[2] = pws[gid].i[ 6];
884 w1[3] = pws[gid].i[ 7];
900 const u32 pw_len = pws[gid].pw_len;
906 m08300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
909 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
915 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
917 if (gid >= gid_max) return;
921 w0[0] = pws[gid].i[ 0];
922 w0[1] = pws[gid].i[ 1];
923 w0[2] = pws[gid].i[ 2];
924 w0[3] = pws[gid].i[ 3];
928 w1[0] = pws[gid].i[ 4];
929 w1[1] = pws[gid].i[ 5];
930 w1[2] = pws[gid].i[ 6];
931 w1[3] = pws[gid].i[ 7];
935 w2[0] = pws[gid].i[ 8];
936 w2[1] = pws[gid].i[ 9];
937 w2[2] = pws[gid].i[10];
938 w2[3] = pws[gid].i[11];
942 w3[0] = pws[gid].i[12];
943 w3[1] = pws[gid].i[13];
947 const u32 pw_len = pws[gid].pw_len;
953 m08300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);