2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
27 #include "include/rp_gpu.h"
31 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
32 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
36 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
37 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
40 __device__ __constant__ gpu_rule_t c_rules[1024];
42 __device__ static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
70 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
71 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
72 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
73 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
74 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
75 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
76 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
77 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
78 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
79 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
80 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
81 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
82 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
83 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
84 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
85 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
86 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
87 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
88 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
89 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
94 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
95 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
96 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
97 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
98 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
99 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
100 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
101 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
102 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
103 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
104 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
105 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
106 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
107 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
108 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
109 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
110 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
111 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
112 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
113 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
118 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
119 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
120 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
121 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
122 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
123 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
124 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
125 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
126 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
127 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
128 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
129 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
130 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
131 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
132 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
133 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
134 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
135 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
136 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
137 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
142 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
143 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
144 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
145 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
146 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
147 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
148 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
149 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
150 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
151 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
152 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
153 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
154 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
155 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
156 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
157 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
158 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
159 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
160 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
161 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
170 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
176 const u32 lid = threadIdx.x;
182 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
184 if (gid >= gid_max) return;
188 pw_buf0[0] = pws[gid].i[ 0];
189 pw_buf0[1] = pws[gid].i[ 1];
190 pw_buf0[2] = pws[gid].i[ 2];
191 pw_buf0[3] = pws[gid].i[ 3];
195 pw_buf1[0] = pws[gid].i[ 4];
196 pw_buf1[1] = pws[gid].i[ 5];
197 pw_buf1[2] = pws[gid].i[ 6];
198 pw_buf1[3] = pws[gid].i[ 7];
200 const u32 pw_len = pws[gid].pw_len;
206 const u32 salt_iter = salt_bufs[salt_pos].salt_iter;
210 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
211 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
212 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
213 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
217 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
218 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
219 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
220 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
222 const u32 salt_len = salt_bufs[salt_pos].salt_len;
226 domain_buf0[0] = salt_bufs[salt_pos].salt_buf_pc[ 0];
227 domain_buf0[1] = salt_bufs[salt_pos].salt_buf_pc[ 1];
228 domain_buf0[2] = salt_bufs[salt_pos].salt_buf_pc[ 2];
229 domain_buf0[3] = salt_bufs[salt_pos].salt_buf_pc[ 3];
233 domain_buf1[0] = salt_bufs[salt_pos].salt_buf_pc[ 4];
234 domain_buf1[1] = salt_bufs[salt_pos].salt_buf_pc[ 5];
235 domain_buf1[2] = salt_bufs[salt_pos].salt_buf_pc[ 6];
238 const u32 domain_len = salt_bufs[salt_pos].salt_buf_pc[ 7];
244 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
268 const u32 out_len = apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
298 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, 1);
300 w0_t[0] |= pw_len & 0xff;
308 s0[0] = salt_buf0[0];
309 s0[1] = salt_buf0[1];
310 s0[2] = salt_buf0[2];
311 s0[3] = salt_buf0[3];
315 s1[0] = salt_buf1[0];
316 s1[1] = salt_buf1[1];
317 s1[2] = salt_buf1[2];
318 s1[3] = salt_buf1[3];
334 switch_buffer_by_offset (s0, s1, s2, s3, 1 + out_len + domain_len + 1);
338 d0[0] = domain_buf0[0];
339 d0[1] = domain_buf0[1];
340 d0[2] = domain_buf0[2];
341 d0[3] = domain_buf0[3];
345 d1[0] = domain_buf1[0];
346 d1[1] = domain_buf1[1];
347 d1[2] = domain_buf1[2];
364 switch_buffer_by_offset (d0, d1, d2, d3, 1 + out_len);
372 w0_t2[0] = swap_workaround (w0_t[0] | d0[0] | s0[0]);
373 w0_t2[1] = swap_workaround (w0_t[1] | d0[1] | s0[1]);
374 w0_t2[2] = swap_workaround (w0_t[2] | d0[2] | s0[2]);
375 w0_t2[3] = swap_workaround (w0_t[3] | d0[3] | s0[3]);
379 w1_t2[0] = swap_workaround (w1_t[0] | d1[0] | s1[0]);
380 w1_t2[1] = swap_workaround (w1_t[1] | d1[1] | s1[1]);
381 w1_t2[2] = swap_workaround (w1_t[2] | d1[2] | s1[2]);
382 w1_t2[3] = swap_workaround (w1_t[3] | d1[3] | s1[3]);
386 w2_t2[0] = swap_workaround (w2_t[0] | d2[0] | s2[0]);
387 w2_t2[1] = swap_workaround (w2_t[1] | d2[1] | s2[1]);
388 w2_t2[2] = swap_workaround (w2_t[2] | d2[2] | s2[2]);
389 w2_t2[3] = swap_workaround (w2_t[3] | d2[3] | s2[3]);
393 w3_t2[0] = swap_workaround (w3_t[0] | d3[0] | s3[0]);
394 w3_t2[1] = swap_workaround (w3_t[1] | d3[1] | s3[1]);
396 w3_t2[3] = (1 + out_len + domain_len + 1 + salt_len) * 8;
406 sha1_transform (w0_t2, w1_t2, w2_t2, w3_t2, digest);
410 for (u32 i = 0; i < salt_iter; i++)
414 w0_t3[0] = digest[0];
415 w0_t3[1] = digest[1];
416 w0_t3[2] = digest[2];
417 w0_t3[3] = digest[3];
421 w1_t3[0] = digest[4];
422 w1_t3[1] = swap_workaround (salt_buf0[0]);
423 w1_t3[2] = swap_workaround (salt_buf0[1]);
424 w1_t3[3] = swap_workaround (salt_buf0[2]);
428 w2_t3[0] = swap_workaround (salt_buf0[3]);
429 w2_t3[1] = swap_workaround (salt_buf1[0]);
430 w2_t3[2] = swap_workaround (salt_buf1[1]);
431 w2_t3[3] = swap_workaround (salt_buf1[2]);
435 w3_t3[0] = swap_workaround (salt_buf1[3]);
438 w3_t3[3] = (20 + salt_len) * 8;
446 sha1_transform (w0_t3, w1_t3, w2_t3, w3_t3, digest);
449 const u32x r0 = digest[3];
450 const u32x r1 = digest[4];
451 const u32x r2 = digest[2];
452 const u32x r3 = digest[1];
454 #include VECT_COMPARE_M
458 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
462 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
466 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
472 const u32 lid = threadIdx.x;
478 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
480 if (gid >= gid_max) return;
484 pw_buf0[0] = pws[gid].i[ 0];
485 pw_buf0[1] = pws[gid].i[ 1];
486 pw_buf0[2] = pws[gid].i[ 2];
487 pw_buf0[3] = pws[gid].i[ 3];
491 pw_buf1[0] = pws[gid].i[ 4];
492 pw_buf1[1] = pws[gid].i[ 5];
493 pw_buf1[2] = pws[gid].i[ 6];
494 pw_buf1[3] = pws[gid].i[ 7];
496 const u32 pw_len = pws[gid].pw_len;
502 const u32 salt_iter = salt_bufs[salt_pos].salt_iter;
506 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
507 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
508 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
509 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
513 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
514 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
515 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
516 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
518 const u32 salt_len = salt_bufs[salt_pos].salt_len;
522 domain_buf0[0] = salt_bufs[salt_pos].salt_buf_pc[ 0];
523 domain_buf0[1] = salt_bufs[salt_pos].salt_buf_pc[ 1];
524 domain_buf0[2] = salt_bufs[salt_pos].salt_buf_pc[ 2];
525 domain_buf0[3] = salt_bufs[salt_pos].salt_buf_pc[ 3];
529 domain_buf1[0] = salt_bufs[salt_pos].salt_buf_pc[ 4];
530 domain_buf1[1] = salt_bufs[salt_pos].salt_buf_pc[ 5];
531 domain_buf1[2] = salt_bufs[salt_pos].salt_buf_pc[ 6];
534 const u32 domain_len = salt_bufs[salt_pos].salt_buf_pc[ 7];
540 const u32 search[4] =
542 digests_buf[digests_offset].digest_buf[DGST_R0],
543 digests_buf[digests_offset].digest_buf[DGST_R1],
544 digests_buf[digests_offset].digest_buf[DGST_R2],
545 digests_buf[digests_offset].digest_buf[DGST_R3]
552 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
576 const u32 out_len = apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
606 switch_buffer_by_offset (w0_t, w1_t, w2_t, w3_t, 1);
608 w0_t[0] |= pw_len & 0xff;
616 s0[0] = salt_buf0[0];
617 s0[1] = salt_buf0[1];
618 s0[2] = salt_buf0[2];
619 s0[3] = salt_buf0[3];
623 s1[0] = salt_buf1[0];
624 s1[1] = salt_buf1[1];
625 s1[2] = salt_buf1[2];
626 s1[3] = salt_buf1[3];
642 switch_buffer_by_offset (s0, s1, s2, s3, 1 + out_len + domain_len + 1);
646 d0[0] = domain_buf0[0];
647 d0[1] = domain_buf0[1];
648 d0[2] = domain_buf0[2];
649 d0[3] = domain_buf0[3];
653 d1[0] = domain_buf1[0];
654 d1[1] = domain_buf1[1];
655 d1[2] = domain_buf1[2];
672 switch_buffer_by_offset (d0, d1, d2, d3, 1 + out_len);
680 w0_t2[0] = swap_workaround (w0_t[0] | d0[0] | s0[0]);
681 w0_t2[1] = swap_workaround (w0_t[1] | d0[1] | s0[1]);
682 w0_t2[2] = swap_workaround (w0_t[2] | d0[2] | s0[2]);
683 w0_t2[3] = swap_workaround (w0_t[3] | d0[3] | s0[3]);
687 w1_t2[0] = swap_workaround (w1_t[0] | d1[0] | s1[0]);
688 w1_t2[1] = swap_workaround (w1_t[1] | d1[1] | s1[1]);
689 w1_t2[2] = swap_workaround (w1_t[2] | d1[2] | s1[2]);
690 w1_t2[3] = swap_workaround (w1_t[3] | d1[3] | s1[3]);
694 w2_t2[0] = swap_workaround (w2_t[0] | d2[0] | s2[0]);
695 w2_t2[1] = swap_workaround (w2_t[1] | d2[1] | s2[1]);
696 w2_t2[2] = swap_workaround (w2_t[2] | d2[2] | s2[2]);
697 w2_t2[3] = swap_workaround (w2_t[3] | d2[3] | s2[3]);
701 w3_t2[0] = swap_workaround (w3_t[0] | d3[0] | s3[0]);
702 w3_t2[1] = swap_workaround (w3_t[1] | d3[1] | s3[1]);
704 w3_t2[3] = (1 + out_len + domain_len + 1 + salt_len) * 8;
714 sha1_transform (w0_t2, w1_t2, w2_t2, w3_t2, digest);
718 for (u32 i = 0; i < salt_iter; i++)
722 w0_t3[0] = digest[0];
723 w0_t3[1] = digest[1];
724 w0_t3[2] = digest[2];
725 w0_t3[3] = digest[3];
729 w1_t3[0] = digest[4];
730 w1_t3[1] = swap_workaround (salt_buf0[0]);
731 w1_t3[2] = swap_workaround (salt_buf0[1]);
732 w1_t3[3] = swap_workaround (salt_buf0[2]);
736 w2_t3[0] = swap_workaround (salt_buf0[3]);
737 w2_t3[1] = swap_workaround (salt_buf1[0]);
738 w2_t3[2] = swap_workaround (salt_buf1[1]);
739 w2_t3[3] = swap_workaround (salt_buf1[2]);
743 w3_t3[0] = swap_workaround (salt_buf1[3]);
746 w3_t3[3] = (20 + salt_len) * 8;
754 sha1_transform (w0_t3, w1_t3, w2_t3, w3_t3, digest);
757 const u32x r0 = digest[3];
758 const u32x r1 = digest[4];
759 const u32x r2 = digest[2];
760 const u32x r3 = digest[1];
762 #include VECT_COMPARE_S
766 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
770 extern "C" __global__ void __launch_bounds__ (256, 1) m08300_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)