2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
29 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
30 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
34 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
35 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
38 __device__ static void sha256_transform (u32x digest[8], const u32x w[16])
66 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, SHA256C00);
67 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, SHA256C01);
68 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, SHA256C02);
69 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, SHA256C03);
70 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, SHA256C04);
71 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, SHA256C05);
72 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, SHA256C06);
73 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, SHA256C07);
74 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, SHA256C08);
75 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, SHA256C09);
76 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, SHA256C0a);
77 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, SHA256C0b);
78 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, SHA256C0c);
79 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, SHA256C0d);
80 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C0e);
81 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C0f);
82 w0_t = SHA256_S1(we_t) + w9_t + SHA256_S0(w1_t) + w0_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, SHA256C10);
83 w1_t = SHA256_S1(wf_t) + wa_t + SHA256_S0(w2_t) + w1_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, SHA256C11);
84 w2_t = SHA256_S1(w0_t) + wb_t + SHA256_S0(w3_t) + w2_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, SHA256C12);
85 w3_t = SHA256_S1(w1_t) + wc_t + SHA256_S0(w4_t) + w3_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, SHA256C13);
86 w4_t = SHA256_S1(w2_t) + wd_t + SHA256_S0(w5_t) + w4_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, SHA256C14);
87 w5_t = SHA256_S1(w3_t) + we_t + SHA256_S0(w6_t) + w5_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, SHA256C15);
88 w6_t = SHA256_S1(w4_t) + wf_t + SHA256_S0(w7_t) + w6_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, SHA256C16);
89 w7_t = SHA256_S1(w5_t) + w0_t + SHA256_S0(w8_t) + w7_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, SHA256C17);
90 w8_t = SHA256_S1(w6_t) + w1_t + SHA256_S0(w9_t) + w8_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, SHA256C18);
91 w9_t = SHA256_S1(w7_t) + w2_t + SHA256_S0(wa_t) + w9_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, SHA256C19);
92 wa_t = SHA256_S1(w8_t) + w3_t + SHA256_S0(wb_t) + wa_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, SHA256C1a);
93 wb_t = SHA256_S1(w9_t) + w4_t + SHA256_S0(wc_t) + wb_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, SHA256C1b);
94 wc_t = SHA256_S1(wa_t) + w5_t + SHA256_S0(wd_t) + wc_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, SHA256C1c);
95 wd_t = SHA256_S1(wb_t) + w6_t + SHA256_S0(we_t) + wd_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, SHA256C1d);
96 we_t = SHA256_S1(wc_t) + w7_t + SHA256_S0(wf_t) + we_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C1e);
97 wf_t = SHA256_S1(wd_t) + w8_t + SHA256_S0(w0_t) + wf_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C1f);
98 w0_t = SHA256_S1(we_t) + w9_t + SHA256_S0(w1_t) + w0_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, SHA256C20);
99 w1_t = SHA256_S1(wf_t) + wa_t + SHA256_S0(w2_t) + w1_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, SHA256C21);
100 w2_t = SHA256_S1(w0_t) + wb_t + SHA256_S0(w3_t) + w2_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, SHA256C22);
101 w3_t = SHA256_S1(w1_t) + wc_t + SHA256_S0(w4_t) + w3_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, SHA256C23);
102 w4_t = SHA256_S1(w2_t) + wd_t + SHA256_S0(w5_t) + w4_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, SHA256C24);
103 w5_t = SHA256_S1(w3_t) + we_t + SHA256_S0(w6_t) + w5_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, SHA256C25);
104 w6_t = SHA256_S1(w4_t) + wf_t + SHA256_S0(w7_t) + w6_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, SHA256C26);
105 w7_t = SHA256_S1(w5_t) + w0_t + SHA256_S0(w8_t) + w7_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, SHA256C27);
106 w8_t = SHA256_S1(w6_t) + w1_t + SHA256_S0(w9_t) + w8_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, SHA256C28);
107 w9_t = SHA256_S1(w7_t) + w2_t + SHA256_S0(wa_t) + w9_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, SHA256C29);
108 wa_t = SHA256_S1(w8_t) + w3_t + SHA256_S0(wb_t) + wa_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, SHA256C2a);
109 wb_t = SHA256_S1(w9_t) + w4_t + SHA256_S0(wc_t) + wb_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, SHA256C2b);
110 wc_t = SHA256_S1(wa_t) + w5_t + SHA256_S0(wd_t) + wc_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, SHA256C2c);
111 wd_t = SHA256_S1(wb_t) + w6_t + SHA256_S0(we_t) + wd_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, SHA256C2d);
112 we_t = SHA256_S1(wc_t) + w7_t + SHA256_S0(wf_t) + we_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C2e);
113 wf_t = SHA256_S1(wd_t) + w8_t + SHA256_S0(w0_t) + wf_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C2f);
114 w0_t = SHA256_S1(we_t) + w9_t + SHA256_S0(w1_t) + w0_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, SHA256C30);
115 w1_t = SHA256_S1(wf_t) + wa_t + SHA256_S0(w2_t) + w1_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, SHA256C31);
116 w2_t = SHA256_S1(w0_t) + wb_t + SHA256_S0(w3_t) + w2_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, SHA256C32);
117 w3_t = SHA256_S1(w1_t) + wc_t + SHA256_S0(w4_t) + w3_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, SHA256C33);
118 w4_t = SHA256_S1(w2_t) + wd_t + SHA256_S0(w5_t) + w4_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, SHA256C34);
119 w5_t = SHA256_S1(w3_t) + we_t + SHA256_S0(w6_t) + w5_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, SHA256C35);
120 w6_t = SHA256_S1(w4_t) + wf_t + SHA256_S0(w7_t) + w6_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, SHA256C36);
121 w7_t = SHA256_S1(w5_t) + w0_t + SHA256_S0(w8_t) + w7_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, SHA256C37);
122 w8_t = SHA256_S1(w6_t) + w1_t + SHA256_S0(w9_t) + w8_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, SHA256C38);
123 w9_t = SHA256_S1(w7_t) + w2_t + SHA256_S0(wa_t) + w9_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, SHA256C39);
124 wa_t = SHA256_S1(w8_t) + w3_t + SHA256_S0(wb_t) + wa_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, SHA256C3a);
125 wb_t = SHA256_S1(w9_t) + w4_t + SHA256_S0(wc_t) + wb_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, SHA256C3b);
126 wc_t = SHA256_S1(wa_t) + w5_t + SHA256_S0(wd_t) + wc_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, SHA256C3c);
127 wd_t = SHA256_S1(wb_t) + w6_t + SHA256_S0(we_t) + wd_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, SHA256C3d);
128 we_t = SHA256_S1(wc_t) + w7_t + SHA256_S0(wf_t) + we_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
129 wf_t = SHA256_S1(wd_t) + w8_t + SHA256_S0(w0_t) + wf_t; SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
141 __device__ __constant__ comb_t c_combs[1024];
143 extern "C" __global__ void __launch_bounds__ (256, 1) m08000_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
149 const u32 lid = threadIdx.x;
155 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
157 if (gid >= gid_max) return;
161 wordl0[0] = pws[gid].i[ 0];
162 wordl0[1] = pws[gid].i[ 1];
163 wordl0[2] = pws[gid].i[ 2];
164 wordl0[3] = pws[gid].i[ 3];
168 wordl1[0] = pws[gid].i[ 4];
169 wordl1[1] = pws[gid].i[ 5];
170 wordl1[2] = pws[gid].i[ 6];
171 wordl1[3] = pws[gid].i[ 7];
187 const u32 pw_l_len = pws[gid].pw_len;
189 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
191 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, c_combs[0].pw_len);
198 const u32 salt_buf0 = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
199 const u32 salt_buf1 = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
200 const u32 salt_buf2 = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]); // 0x80
206 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
213 wordr0[0] = c_combs[il_pos].i[0];
214 wordr0[1] = c_combs[il_pos].i[1];
215 wordr0[2] = c_combs[il_pos].i[2];
216 wordr0[3] = c_combs[il_pos].i[3];
217 wordr1[0] = c_combs[il_pos].i[4];
218 wordr1[1] = c_combs[il_pos].i[5];
219 wordr1[2] = c_combs[il_pos].i[6];
220 wordr1[3] = c_combs[il_pos].i[7];
230 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
232 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
240 w0[0] = wordl0[0] | wordr0[0];
241 w0[1] = wordl0[1] | wordr0[1];
242 w0[2] = wordl0[2] | wordr0[2];
243 w0[3] = wordl0[3] | wordr0[3];
244 w1[0] = wordl1[0] | wordr1[0];
245 w1[1] = wordl1[1] | wordr1[1];
246 w1[2] = wordl1[2] | wordr1[2];
247 w1[3] = wordl1[3] | wordr1[3];
248 w2[0] = wordl2[0] | wordr2[0];
249 w2[1] = wordl2[1] | wordr2[1];
250 w2[2] = wordl2[2] | wordr2[2];
251 w2[3] = wordl2[3] | wordr2[3];
252 w3[0] = wordl3[0] | wordr3[0];
253 w3[1] = wordl3[1] | wordr3[1];
254 w3[2] = wordl3[2] | wordr3[2];
255 w3[3] = wordl3[3] | wordr3[3];
262 make_unicode (w0, w0_t, w1_t);
264 make_unicode (w1, w2_t, w3_t);
268 w_t[ 0] = swap_workaround (w0_t[0]);
269 w_t[ 1] = swap_workaround (w0_t[1]);
270 w_t[ 2] = swap_workaround (w0_t[2]);
271 w_t[ 3] = swap_workaround (w0_t[3]);
272 w_t[ 4] = swap_workaround (w1_t[0]);
273 w_t[ 5] = swap_workaround (w1_t[1]);
274 w_t[ 6] = swap_workaround (w1_t[2]);
275 w_t[ 7] = swap_workaround (w1_t[3]);
276 w_t[ 8] = swap_workaround (w2_t[0]);
277 w_t[ 9] = swap_workaround (w2_t[1]);
278 w_t[10] = swap_workaround (w2_t[2]);
279 w_t[11] = swap_workaround (w2_t[3]);
280 w_t[12] = swap_workaround (w3_t[0]);
281 w_t[13] = swap_workaround (w3_t[1]);
282 w_t[14] = swap_workaround (w3_t[2]);
283 w_t[15] = swap_workaround (w3_t[3]);
285 w_t[ 0] = w_t[ 0] >> 8;
286 w_t[ 1] = w_t[ 1] >> 8;
287 w_t[ 2] = w_t[ 2] >> 8;
288 w_t[ 3] = w_t[ 3] >> 8;
289 w_t[ 4] = w_t[ 4] >> 8;
290 w_t[ 5] = w_t[ 5] >> 8;
291 w_t[ 6] = w_t[ 6] >> 8;
292 w_t[ 7] = w_t[ 7] >> 8;
293 w_t[ 8] = w_t[ 8] >> 8;
294 w_t[ 9] = w_t[ 9] >> 8;
295 w_t[10] = w_t[10] >> 8;
296 w_t[11] = w_t[11] >> 8;
297 w_t[12] = w_t[12] >> 8;
298 w_t[13] = w_t[13] >> 8;
299 w_t[14] = w_t[14] >> 8;
300 w_t[15] = w_t[15] >> 8;
304 digest[0] = SHA256M_A;
305 digest[1] = SHA256M_B;
306 digest[2] = SHA256M_C;
307 digest[3] = SHA256M_D;
308 digest[4] = SHA256M_E;
309 digest[5] = SHA256M_F;
310 digest[6] = SHA256M_G;
311 digest[7] = SHA256M_H;
313 sha256_transform (digest, w_t); // 0 - 64
332 sha256_transform (digest, w_t); // 64 - 128
333 sha256_transform (digest, w_t); // 128 - 192
334 sha256_transform (digest, w_t); // 192 - 256
335 sha256_transform (digest, w_t); // 256 - 320
336 sha256_transform (digest, w_t); // 320 - 384
337 sha256_transform (digest, w_t); // 384 - 448
339 w_t[15] = 0 | salt_buf0 >> 16;
341 sha256_transform (digest, w_t); // 448 - 512
343 w_t[ 0] = salt_buf0 << 16 | salt_buf1 >> 16;
344 w_t[ 1] = salt_buf1 << 16 | salt_buf2 >> 16;
345 w_t[ 2] = salt_buf2 << 16 | 0;
346 w_t[15] = (510 + 8) * 8;
348 sha256_transform (digest, w_t); // 512 - 576
350 const u32x r0 = digest[3];
351 const u32x r1 = digest[7];
352 const u32x r2 = digest[2];
353 const u32x r3 = digest[6];
355 #include VECT_COMPARE_M
359 extern "C" __global__ void __launch_bounds__ (256, 1) m08000_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
363 extern "C" __global__ void __launch_bounds__ (256, 1) m08000_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
367 extern "C" __global__ void __launch_bounds__ (256, 1) m08000_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 combs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
373 const u32 lid = threadIdx.x;
379 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
381 if (gid >= gid_max) return;
385 wordl0[0] = pws[gid].i[ 0];
386 wordl0[1] = pws[gid].i[ 1];
387 wordl0[2] = pws[gid].i[ 2];
388 wordl0[3] = pws[gid].i[ 3];
392 wordl1[0] = pws[gid].i[ 4];
393 wordl1[1] = pws[gid].i[ 5];
394 wordl1[2] = pws[gid].i[ 6];
395 wordl1[3] = pws[gid].i[ 7];
411 const u32 pw_l_len = pws[gid].pw_len;
413 if (combs_mode == COMBINATOR_MODE_BASE_RIGHT)
415 switch_buffer_by_offset (wordl0, wordl1, wordl2, wordl3, c_combs[0].pw_len);
422 const u32 salt_buf0 = swap_workaround (salt_bufs[salt_pos].salt_buf[ 0]);
423 const u32 salt_buf1 = swap_workaround (salt_bufs[salt_pos].salt_buf[ 1]);
424 const u32 salt_buf2 = swap_workaround (salt_bufs[salt_pos].salt_buf[ 2]); // 0x80
430 const u32 search[4] =
432 digests_buf[digests_offset].digest_buf[DGST_R0],
433 digests_buf[digests_offset].digest_buf[DGST_R1],
434 digests_buf[digests_offset].digest_buf[DGST_R2],
435 digests_buf[digests_offset].digest_buf[DGST_R3]
442 for (u32 il_pos = 0; il_pos < combs_cnt; il_pos++)
449 wordr0[0] = c_combs[il_pos].i[0];
450 wordr0[1] = c_combs[il_pos].i[1];
451 wordr0[2] = c_combs[il_pos].i[2];
452 wordr0[3] = c_combs[il_pos].i[3];
453 wordr1[0] = c_combs[il_pos].i[4];
454 wordr1[1] = c_combs[il_pos].i[5];
455 wordr1[2] = c_combs[il_pos].i[6];
456 wordr1[3] = c_combs[il_pos].i[7];
466 if (combs_mode == COMBINATOR_MODE_BASE_LEFT)
468 switch_buffer_by_offset (wordr0, wordr1, wordr2, wordr3, pw_l_len);
476 w0[0] = wordl0[0] | wordr0[0];
477 w0[1] = wordl0[1] | wordr0[1];
478 w0[2] = wordl0[2] | wordr0[2];
479 w0[3] = wordl0[3] | wordr0[3];
480 w1[0] = wordl1[0] | wordr1[0];
481 w1[1] = wordl1[1] | wordr1[1];
482 w1[2] = wordl1[2] | wordr1[2];
483 w1[3] = wordl1[3] | wordr1[3];
484 w2[0] = wordl2[0] | wordr2[0];
485 w2[1] = wordl2[1] | wordr2[1];
486 w2[2] = wordl2[2] | wordr2[2];
487 w2[3] = wordl2[3] | wordr2[3];
488 w3[0] = wordl3[0] | wordr3[0];
489 w3[1] = wordl3[1] | wordr3[1];
490 w3[2] = wordl3[2] | wordr3[2];
491 w3[3] = wordl3[3] | wordr3[3];
498 make_unicode (w0, w0_t, w1_t);
500 make_unicode (w1, w2_t, w3_t);
504 w_t[ 0] = swap_workaround (w0_t[0]);
505 w_t[ 1] = swap_workaround (w0_t[1]);
506 w_t[ 2] = swap_workaround (w0_t[2]);
507 w_t[ 3] = swap_workaround (w0_t[3]);
508 w_t[ 4] = swap_workaround (w1_t[0]);
509 w_t[ 5] = swap_workaround (w1_t[1]);
510 w_t[ 6] = swap_workaround (w1_t[2]);
511 w_t[ 7] = swap_workaround (w1_t[3]);
512 w_t[ 8] = swap_workaround (w2_t[0]);
513 w_t[ 9] = swap_workaround (w2_t[1]);
514 w_t[10] = swap_workaround (w2_t[2]);
515 w_t[11] = swap_workaround (w2_t[3]);
516 w_t[12] = swap_workaround (w3_t[0]);
517 w_t[13] = swap_workaround (w3_t[1]);
518 w_t[14] = swap_workaround (w3_t[2]);
519 w_t[15] = swap_workaround (w3_t[3]);
521 w_t[ 0] = w_t[ 0] >> 8;
522 w_t[ 1] = w_t[ 1] >> 8;
523 w_t[ 2] = w_t[ 2] >> 8;
524 w_t[ 3] = w_t[ 3] >> 8;
525 w_t[ 4] = w_t[ 4] >> 8;
526 w_t[ 5] = w_t[ 5] >> 8;
527 w_t[ 6] = w_t[ 6] >> 8;
528 w_t[ 7] = w_t[ 7] >> 8;
529 w_t[ 8] = w_t[ 8] >> 8;
530 w_t[ 9] = w_t[ 9] >> 8;
531 w_t[10] = w_t[10] >> 8;
532 w_t[11] = w_t[11] >> 8;
533 w_t[12] = w_t[12] >> 8;
534 w_t[13] = w_t[13] >> 8;
535 w_t[14] = w_t[14] >> 8;
536 w_t[15] = w_t[15] >> 8;
540 digest[0] = SHA256M_A;
541 digest[1] = SHA256M_B;
542 digest[2] = SHA256M_C;
543 digest[3] = SHA256M_D;
544 digest[4] = SHA256M_E;
545 digest[5] = SHA256M_F;
546 digest[6] = SHA256M_G;
547 digest[7] = SHA256M_H;
549 sha256_transform (digest, w_t); // 0 - 64
568 sha256_transform (digest, w_t); // 64 - 128
569 sha256_transform (digest, w_t); // 128 - 192
570 sha256_transform (digest, w_t); // 192 - 256
571 sha256_transform (digest, w_t); // 256 - 320
572 sha256_transform (digest, w_t); // 320 - 384
573 sha256_transform (digest, w_t); // 384 - 448
575 w_t[15] = 0 | salt_buf0 >> 16;
577 sha256_transform (digest, w_t); // 448 - 512
579 w_t[ 0] = salt_buf0 << 16 | salt_buf1 >> 16;
580 w_t[ 1] = salt_buf1 << 16 | salt_buf2 >> 16;
581 w_t[ 2] = salt_buf2 << 16 | 0;
582 w_t[15] = (510 + 8) * 8;
584 sha256_transform (digest, w_t); // 512 - 576
586 const u32x r0 = digest[3];
587 const u32x r1 = digest[7];
588 const u32x r2 = digest[2];
589 const u32x r3 = digest[6];
591 #include VECT_COMPARE_S
595 extern "C" __global__ void __launch_bounds__ (256, 1) m08000_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
599 extern "C" __global__ void __launch_bounds__ (256, 1) m08000_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const void *esalt_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)