2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
24 #include "include/kernel_functions.c"
26 #include "common_nv.c"
27 #include "include/rp_gpu.h"
31 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
32 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
36 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
37 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
41 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
42 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
53 __device__ static void swap (RC4_KEY *rc4_key, const u32 i, const u32 j)
58 rc4_key->S[i] = rc4_key->S[j];
62 __device__ static void rc4_init_16 (RC4_KEY *rc4_key, const u32 data[4])
67 u32 *ptr = (u32 *) rc4_key->S;
70 for (u32 i = 0; i < 64; i++)
77 for (u32 i = 0; i < 16; i++)
85 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j & 0xff); idx++;
86 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j & 0xff); idx++;
87 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j & 0xff); idx++;
88 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j & 0xff); idx++;
92 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j & 0xff); idx++;
93 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j & 0xff); idx++;
94 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j & 0xff); idx++;
95 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j & 0xff); idx++;
99 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j & 0xff); idx++;
100 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j & 0xff); idx++;
101 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j & 0xff); idx++;
102 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j & 0xff); idx++;
106 j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j & 0xff); idx++;
107 j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j & 0xff); idx++;
108 j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j & 0xff); idx++;
109 j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j & 0xff); idx++;
113 __device__ static u8 rc4_next_16 (RC4_KEY *rc4_key, u8 i, u8 j, const u32 in[4], u32 out[4])
115 for (u32 k = 0; k < 4; k++)
124 swap (rc4_key, i, j);
126 idx = rc4_key->S[i] + rc4_key->S[j];
128 xor4 |= rc4_key->S[idx] << 0;
133 swap (rc4_key, i, j);
135 idx = rc4_key->S[i] + rc4_key->S[j];
137 xor4 |= rc4_key->S[idx] << 8;
142 swap (rc4_key, i, j);
144 idx = rc4_key->S[i] + rc4_key->S[j];
146 xor4 |= rc4_key->S[idx] << 16;
151 swap (rc4_key, i, j);
153 idx = rc4_key->S[i] + rc4_key->S[j];
155 xor4 |= rc4_key->S[idx] << 24;
157 out[k] = in[k] ^ xor4;
163 __device__ static void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5])
191 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
192 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
193 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
194 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
195 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
196 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
197 SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
198 SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
199 SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
200 SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
201 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
202 SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
203 SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
204 SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
205 SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
206 SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
207 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
208 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
209 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
210 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
215 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
216 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
217 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
218 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
219 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
220 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
221 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
222 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
223 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
224 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
225 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
226 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
227 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
228 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
229 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
230 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
231 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
232 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
233 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
234 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
239 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
240 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
241 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
242 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
243 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
244 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
245 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
246 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
247 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
248 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
249 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
250 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
251 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
252 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
253 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
254 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
255 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
256 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
257 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
258 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
263 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
264 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
265 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
266 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
267 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
268 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
269 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
270 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
271 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
272 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
273 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
274 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
275 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
276 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
277 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
278 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
279 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
280 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
281 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
282 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
291 __device__ __constant__ gpu_rule_t c_rules[1024];
293 extern "C" __global__ void __launch_bounds__ (64, 1) m09810_m04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const oldoffice34_t *oldoffice34_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
299 const u32 lid = threadIdx.x;
301 __shared__ RC4_KEY rc4_keys[64];
303 RC4_KEY *rc4_key = &rc4_keys[lid];
309 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
311 if (gid >= gid_max) return;
315 pw_buf0[0] = pws[gid].i[ 0];
316 pw_buf0[1] = pws[gid].i[ 1];
317 pw_buf0[2] = pws[gid].i[ 2];
318 pw_buf0[3] = pws[gid].i[ 3];
322 pw_buf1[0] = pws[gid].i[ 4];
323 pw_buf1[1] = pws[gid].i[ 5];
324 pw_buf1[2] = pws[gid].i[ 6];
325 pw_buf1[3] = pws[gid].i[ 7];
327 const u32 pw_len = pws[gid].pw_len;
333 const u32 version = oldoffice34_bufs[salt_pos].version;
335 u32 encryptedVerifier[4];
337 encryptedVerifier[0] = oldoffice34_bufs[salt_pos].encryptedVerifier[0];
338 encryptedVerifier[1] = oldoffice34_bufs[salt_pos].encryptedVerifier[1];
339 encryptedVerifier[2] = oldoffice34_bufs[salt_pos].encryptedVerifier[2];
340 encryptedVerifier[3] = oldoffice34_bufs[salt_pos].encryptedVerifier[3];
346 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
376 apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
381 key[1] = w0[1] & 0xff;
385 rc4_init_16 (rc4_key, key);
389 u8 j = rc4_next_16 (rc4_key, 0, 0, encryptedVerifier, out);
396 w0_t[0] = swap_workaround (out[0]);
397 w0_t[1] = swap_workaround (out[1]);
398 w0_t[2] = swap_workaround (out[2]);
399 w0_t[3] = swap_workaround (out[3]);
400 w1_t[0] = 0x80000000;
421 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
423 digest[0] = swap_workaround (digest[0]);
424 digest[1] = swap_workaround (digest[1]);
425 digest[2] = swap_workaround (digest[2]);
426 digest[3] = swap_workaround (digest[3]);
428 rc4_next_16 (rc4_key, 16, j, digest, out);
430 const u32x r0 = out[0];
431 const u32x r1 = out[1];
432 const u32x r2 = out[2];
433 const u32x r3 = out[3];
435 #include VECT_COMPARE_M
439 extern "C" __global__ void __launch_bounds__ (64, 1) m09810_m08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const oldoffice34_t *oldoffice34_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
443 extern "C" __global__ void __launch_bounds__ (64, 1) m09810_m16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const oldoffice34_t *oldoffice34_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
447 extern "C" __global__ void __launch_bounds__ (64, 1) m09810_s04 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const oldoffice34_t *oldoffice34_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
453 const u32 lid = threadIdx.x;
455 __shared__ RC4_KEY rc4_keys[64];
457 RC4_KEY *rc4_key = &rc4_keys[lid];
463 const u32 gid = (blockIdx.x * blockDim.x) + threadIdx.x;
465 if (gid >= gid_max) return;
469 pw_buf0[0] = pws[gid].i[ 0];
470 pw_buf0[1] = pws[gid].i[ 1];
471 pw_buf0[2] = pws[gid].i[ 2];
472 pw_buf0[3] = pws[gid].i[ 3];
476 pw_buf1[0] = pws[gid].i[ 4];
477 pw_buf1[1] = pws[gid].i[ 5];
478 pw_buf1[2] = pws[gid].i[ 6];
479 pw_buf1[3] = pws[gid].i[ 7];
481 const u32 pw_len = pws[gid].pw_len;
487 const u32 search[4] =
489 digests_buf[digests_offset].digest_buf[DGST_R0],
490 digests_buf[digests_offset].digest_buf[DGST_R1],
491 digests_buf[digests_offset].digest_buf[DGST_R2],
492 digests_buf[digests_offset].digest_buf[DGST_R3]
499 const u32 version = oldoffice34_bufs[salt_pos].version;
501 u32 encryptedVerifier[4];
503 encryptedVerifier[0] = oldoffice34_bufs[salt_pos].encryptedVerifier[0];
504 encryptedVerifier[1] = oldoffice34_bufs[salt_pos].encryptedVerifier[1];
505 encryptedVerifier[2] = oldoffice34_bufs[salt_pos].encryptedVerifier[2];
506 encryptedVerifier[3] = oldoffice34_bufs[salt_pos].encryptedVerifier[3];
512 for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
542 apply_rules (c_rules[il_pos].cmds, w0, w1, pw_len);
547 key[1] = w0[1] & 0xff;
551 rc4_init_16 (rc4_key, key);
555 u8 j = rc4_next_16 (rc4_key, 0, 0, encryptedVerifier, out);
562 w0_t[0] = swap_workaround (out[0]);
563 w0_t[1] = swap_workaround (out[1]);
564 w0_t[2] = swap_workaround (out[2]);
565 w0_t[3] = swap_workaround (out[3]);
566 w1_t[0] = 0x80000000;
587 sha1_transform (w0_t, w1_t, w2_t, w3_t, digest);
589 digest[0] = swap_workaround (digest[0]);
590 digest[1] = swap_workaround (digest[1]);
591 digest[2] = swap_workaround (digest[2]);
592 digest[3] = swap_workaround (digest[3]);
594 rc4_next_16 (rc4_key, 16, j, digest, out);
596 const u32x r0 = out[0];
597 const u32x r1 = out[1];
598 const u32x r2 = out[2];
599 const u32x r3 = out[3];
601 #include VECT_COMPARE_S
605 extern "C" __global__ void __launch_bounds__ (64, 1) m09810_s08 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const oldoffice34_t *oldoffice34_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
609 extern "C" __global__ void __launch_bounds__ (64, 1) m09810_s16 (const pw_t *pws, const gpu_rule_t *rules_buf, const comb_t *combs_buf, const bf_t *bfs_buf, const void *tmps, void *hooks, const u32 *bitmaps_buf_s1_a, const u32 *bitmaps_buf_s1_b, const u32 *bitmaps_buf_s1_c, const u32 *bitmaps_buf_s1_d, const u32 *bitmaps_buf_s2_a, const u32 *bitmaps_buf_s2_b, const u32 *bitmaps_buf_s2_c, const u32 *bitmaps_buf_s2_d, plain_t *plains_buf, const digest_t *digests_buf, u32 *hashes_shown, const salt_t *salt_bufs, const oldoffice34_t *oldoffice34_bufs, u32 *d_return_buf, u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)