2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_S "check_single_vect1_comp4.c"
34 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
38 #define VECT_COMPARE_S "check_single_vect2_comp4.c"
39 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
43 #define VECT_COMPARE_S "check_single_vect4_comp4.c"
44 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
48 #define uint_to_hex_lower8(i) l_bin2asc[(i)]
52 #define uint_to_hex_lower8(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
56 #define uint_to_hex_lower8(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
59 static void m04400m (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 l_bin2asc[256])
65 const u32 gid = get_global_id (0);
66 const u32 lid = get_local_id (0);
78 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
80 const u32 w0r = bfs_buf[il_pos].i;
103 u32x wf_t = pw_len * 8;
114 SHA1_STEP (SHA1_F0o, a, b, c, d, e, w0_t);
115 SHA1_STEP (SHA1_F0o, e, a, b, c, d, w1_t);
116 SHA1_STEP (SHA1_F0o, d, e, a, b, c, w2_t);
117 SHA1_STEP (SHA1_F0o, c, d, e, a, b, w3_t);
118 SHA1_STEP (SHA1_F0o, b, c, d, e, a, w4_t);
119 SHA1_STEP (SHA1_F0o, a, b, c, d, e, w5_t);
120 SHA1_STEP (SHA1_F0o, e, a, b, c, d, w6_t);
121 SHA1_STEP (SHA1_F0o, d, e, a, b, c, w7_t);
122 SHA1_STEP (SHA1_F0o, c, d, e, a, b, w8_t);
123 SHA1_STEP (SHA1_F0o, b, c, d, e, a, w9_t);
124 SHA1_STEP (SHA1_F0o, a, b, c, d, e, wa_t);
125 SHA1_STEP (SHA1_F0o, e, a, b, c, d, wb_t);
126 SHA1_STEP (SHA1_F0o, d, e, a, b, c, wc_t);
127 SHA1_STEP (SHA1_F0o, c, d, e, a, b, wd_t);
128 SHA1_STEP (SHA1_F0o, b, c, d, e, a, we_t);
129 SHA1_STEP (SHA1_F0o, a, b, c, d, e, wf_t);
130 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, e, a, b, c, d, w0_t);
131 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, d, e, a, b, c, w1_t);
132 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, c, d, e, a, b, w2_t);
133 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, b, c, d, e, a, w3_t);
138 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w4_t);
139 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w5_t);
140 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w6_t);
141 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w7_t);
142 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w8_t);
143 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w9_t);
144 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wa_t);
145 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, wb_t);
146 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, wc_t);
147 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wd_t);
148 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, we_t);
149 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wf_t);
150 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w0_t);
151 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w1_t);
152 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w2_t);
153 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w3_t);
154 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w4_t);
155 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w5_t);
156 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w6_t);
157 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w7_t);
162 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, w8_t);
163 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, w9_t);
164 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, wa_t);
165 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, wb_t);
166 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, wc_t);
167 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, wd_t);
168 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, we_t);
169 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, wf_t);
170 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, w0_t);
171 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, w1_t);
172 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, w2_t);
173 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, w3_t);
174 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, w4_t);
175 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, w5_t);
176 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, w6_t);
177 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, w7_t);
178 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, w8_t);
179 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, w9_t);
180 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, wa_t);
181 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, wb_t);
186 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, wc_t);
187 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wd_t);
188 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, we_t);
189 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, wf_t);
190 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w0_t);
191 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w1_t);
192 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w2_t);
193 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w3_t);
194 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w4_t);
195 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w5_t);
196 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w6_t);
197 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w7_t);
198 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w8_t);
199 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w9_t);
200 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wa_t);
201 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, wb_t);
202 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wc_t);
203 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, wd_t);
204 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
205 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
217 w0_t = uint_to_hex_lower8 ((a >> 24) & 255) << 0
218 | uint_to_hex_lower8 ((a >> 16) & 255) << 16;
219 w1_t = uint_to_hex_lower8 ((a >> 8) & 255) << 0
220 | uint_to_hex_lower8 ((a >> 0) & 255) << 16;
221 w2_t = uint_to_hex_lower8 ((b >> 24) & 255) << 0
222 | uint_to_hex_lower8 ((b >> 16) & 255) << 16;
223 w3_t = uint_to_hex_lower8 ((b >> 8) & 255) << 0
224 | uint_to_hex_lower8 ((b >> 0) & 255) << 16;
225 w4_t = uint_to_hex_lower8 ((c >> 24) & 255) << 0
226 | uint_to_hex_lower8 ((c >> 16) & 255) << 16;
227 w5_t = uint_to_hex_lower8 ((c >> 8) & 255) << 0
228 | uint_to_hex_lower8 ((c >> 0) & 255) << 16;
229 w6_t = uint_to_hex_lower8 ((d >> 24) & 255) << 0
230 | uint_to_hex_lower8 ((d >> 16) & 255) << 16;
231 w7_t = uint_to_hex_lower8 ((d >> 8) & 255) << 0
232 | uint_to_hex_lower8 ((d >> 0) & 255) << 16;
233 w8_t = uint_to_hex_lower8 ((e >> 24) & 255) << 0
234 | uint_to_hex_lower8 ((e >> 16) & 255) << 16;
235 w9_t = uint_to_hex_lower8 ((e >> 8) & 255) << 0
236 | uint_to_hex_lower8 ((e >> 0) & 255) << 16;
250 MD5_STEP (MD5_Fo, a, b, c, d, w0_t, MD5C00, MD5S00);
251 MD5_STEP (MD5_Fo, d, a, b, c, w1_t, MD5C01, MD5S01);
252 MD5_STEP (MD5_Fo, c, d, a, b, w2_t, MD5C02, MD5S02);
253 MD5_STEP (MD5_Fo, b, c, d, a, w3_t, MD5C03, MD5S03);
254 MD5_STEP (MD5_Fo, a, b, c, d, w4_t, MD5C04, MD5S00);
255 MD5_STEP (MD5_Fo, d, a, b, c, w5_t, MD5C05, MD5S01);
256 MD5_STEP (MD5_Fo, c, d, a, b, w6_t, MD5C06, MD5S02);
257 MD5_STEP (MD5_Fo, b, c, d, a, w7_t, MD5C07, MD5S03);
258 MD5_STEP (MD5_Fo, a, b, c, d, w8_t, MD5C08, MD5S00);
259 MD5_STEP (MD5_Fo, d, a, b, c, w9_t, MD5C09, MD5S01);
260 MD5_STEP (MD5_Fo, c, d, a, b, wa_t, MD5C0a, MD5S02);
261 MD5_STEP (MD5_Fo, b, c, d, a, wb_t, MD5C0b, MD5S03);
262 MD5_STEP (MD5_Fo, a, b, c, d, wc_t, MD5C0c, MD5S00);
263 MD5_STEP (MD5_Fo, d, a, b, c, wd_t, MD5C0d, MD5S01);
264 MD5_STEP (MD5_Fo, c, d, a, b, we_t, MD5C0e, MD5S02);
265 MD5_STEP (MD5_Fo, b, c, d, a, wf_t, MD5C0f, MD5S03);
267 MD5_STEP (MD5_Go, a, b, c, d, w1_t, MD5C10, MD5S10);
268 MD5_STEP (MD5_Go, d, a, b, c, w6_t, MD5C11, MD5S11);
269 MD5_STEP (MD5_Go, c, d, a, b, wb_t, MD5C12, MD5S12);
270 MD5_STEP (MD5_Go, b, c, d, a, w0_t, MD5C13, MD5S13);
271 MD5_STEP (MD5_Go, a, b, c, d, w5_t, MD5C14, MD5S10);
272 MD5_STEP (MD5_Go, d, a, b, c, wa_t, MD5C15, MD5S11);
273 MD5_STEP (MD5_Go, c, d, a, b, wf_t, MD5C16, MD5S12);
274 MD5_STEP (MD5_Go, b, c, d, a, w4_t, MD5C17, MD5S13);
275 MD5_STEP (MD5_Go, a, b, c, d, w9_t, MD5C18, MD5S10);
276 MD5_STEP (MD5_Go, d, a, b, c, we_t, MD5C19, MD5S11);
277 MD5_STEP (MD5_Go, c, d, a, b, w3_t, MD5C1a, MD5S12);
278 MD5_STEP (MD5_Go, b, c, d, a, w8_t, MD5C1b, MD5S13);
279 MD5_STEP (MD5_Go, a, b, c, d, wd_t, MD5C1c, MD5S10);
280 MD5_STEP (MD5_Go, d, a, b, c, w2_t, MD5C1d, MD5S11);
281 MD5_STEP (MD5_Go, c, d, a, b, w7_t, MD5C1e, MD5S12);
282 MD5_STEP (MD5_Go, b, c, d, a, wc_t, MD5C1f, MD5S13);
284 MD5_STEP (MD5_H , a, b, c, d, w5_t, MD5C20, MD5S20);
285 MD5_STEP (MD5_H , d, a, b, c, w8_t, MD5C21, MD5S21);
286 MD5_STEP (MD5_H , c, d, a, b, wb_t, MD5C22, MD5S22);
287 MD5_STEP (MD5_H , b, c, d, a, we_t, MD5C23, MD5S23);
288 MD5_STEP (MD5_H , a, b, c, d, w1_t, MD5C24, MD5S20);
289 MD5_STEP (MD5_H , d, a, b, c, w4_t, MD5C25, MD5S21);
290 MD5_STEP (MD5_H , c, d, a, b, w7_t, MD5C26, MD5S22);
291 MD5_STEP (MD5_H , b, c, d, a, wa_t, MD5C27, MD5S23);
292 MD5_STEP (MD5_H , a, b, c, d, wd_t, MD5C28, MD5S20);
293 MD5_STEP (MD5_H , d, a, b, c, w0_t, MD5C29, MD5S21);
294 MD5_STEP (MD5_H , c, d, a, b, w3_t, MD5C2a, MD5S22);
295 MD5_STEP (MD5_H , b, c, d, a, w6_t, MD5C2b, MD5S23);
296 MD5_STEP (MD5_H , a, b, c, d, w9_t, MD5C2c, MD5S20);
297 MD5_STEP (MD5_H , d, a, b, c, wc_t, MD5C2d, MD5S21);
298 MD5_STEP (MD5_H , c, d, a, b, wf_t, MD5C2e, MD5S22);
299 MD5_STEP (MD5_H , b, c, d, a, w2_t, MD5C2f, MD5S23);
301 MD5_STEP (MD5_I , a, b, c, d, w0_t, MD5C30, MD5S30);
302 MD5_STEP (MD5_I , d, a, b, c, w7_t, MD5C31, MD5S31);
303 MD5_STEP (MD5_I , c, d, a, b, we_t, MD5C32, MD5S32);
304 MD5_STEP (MD5_I , b, c, d, a, w5_t, MD5C33, MD5S33);
305 MD5_STEP (MD5_I , a, b, c, d, wc_t, MD5C34, MD5S30);
306 MD5_STEP (MD5_I , d, a, b, c, w3_t, MD5C35, MD5S31);
307 MD5_STEP (MD5_I , c, d, a, b, wa_t, MD5C36, MD5S32);
308 MD5_STEP (MD5_I , b, c, d, a, w1_t, MD5C37, MD5S33);
309 MD5_STEP (MD5_I , a, b, c, d, w8_t, MD5C38, MD5S30);
310 MD5_STEP (MD5_I , d, a, b, c, wf_t, MD5C39, MD5S31);
311 MD5_STEP (MD5_I , c, d, a, b, w6_t, MD5C3a, MD5S32);
312 MD5_STEP (MD5_I , b, c, d, a, wd_t, MD5C3b, MD5S33);
313 MD5_STEP (MD5_I , a, b, c, d, w4_t, MD5C3c, MD5S30);
314 MD5_STEP (MD5_I , d, a, b, c, wb_t, MD5C3d, MD5S31);
315 MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
316 MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
323 #include VECT_COMPARE_M
327 static void m04400s (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 l_bin2asc[256])
333 const u32 gid = get_global_id (0);
334 const u32 lid = get_local_id (0);
340 const u32 search[4] =
342 digests_buf[digests_offset].digest_buf[DGST_R0],
343 digests_buf[digests_offset].digest_buf[DGST_R1],
344 digests_buf[digests_offset].digest_buf[DGST_R2],
345 digests_buf[digests_offset].digest_buf[DGST_R3]
354 for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
356 const u32 w0r = bfs_buf[il_pos].i;
379 u32x wf_t = pw_len * 8;
390 SHA1_STEP (SHA1_F0o, a, b, c, d, e, w0_t);
391 SHA1_STEP (SHA1_F0o, e, a, b, c, d, w1_t);
392 SHA1_STEP (SHA1_F0o, d, e, a, b, c, w2_t);
393 SHA1_STEP (SHA1_F0o, c, d, e, a, b, w3_t);
394 SHA1_STEP (SHA1_F0o, b, c, d, e, a, w4_t);
395 SHA1_STEP (SHA1_F0o, a, b, c, d, e, w5_t);
396 SHA1_STEP (SHA1_F0o, e, a, b, c, d, w6_t);
397 SHA1_STEP (SHA1_F0o, d, e, a, b, c, w7_t);
398 SHA1_STEP (SHA1_F0o, c, d, e, a, b, w8_t);
399 SHA1_STEP (SHA1_F0o, b, c, d, e, a, w9_t);
400 SHA1_STEP (SHA1_F0o, a, b, c, d, e, wa_t);
401 SHA1_STEP (SHA1_F0o, e, a, b, c, d, wb_t);
402 SHA1_STEP (SHA1_F0o, d, e, a, b, c, wc_t);
403 SHA1_STEP (SHA1_F0o, c, d, e, a, b, wd_t);
404 SHA1_STEP (SHA1_F0o, b, c, d, e, a, we_t);
405 SHA1_STEP (SHA1_F0o, a, b, c, d, e, wf_t);
406 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, e, a, b, c, d, w0_t);
407 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, d, e, a, b, c, w1_t);
408 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, c, d, e, a, b, w2_t);
409 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, b, c, d, e, a, w3_t);
414 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w4_t);
415 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w5_t);
416 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w6_t);
417 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w7_t);
418 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w8_t);
419 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w9_t);
420 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wa_t);
421 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, wb_t);
422 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, wc_t);
423 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wd_t);
424 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, we_t);
425 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wf_t);
426 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w0_t);
427 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w1_t);
428 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w2_t);
429 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w3_t);
430 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w4_t);
431 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w5_t);
432 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w6_t);
433 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w7_t);
438 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, w8_t);
439 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, w9_t);
440 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, wa_t);
441 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, wb_t);
442 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, wc_t);
443 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, wd_t);
444 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, we_t);
445 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, wf_t);
446 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, w0_t);
447 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, w1_t);
448 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, w2_t);
449 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, w3_t);
450 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, w4_t);
451 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, w5_t);
452 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, w6_t);
453 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, a, b, c, d, e, w7_t);
454 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, e, a, b, c, d, w8_t);
455 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, d, e, a, b, c, w9_t);
456 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, c, d, e, a, b, wa_t);
457 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, b, c, d, e, a, wb_t);
462 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, wc_t);
463 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wd_t);
464 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, we_t);
465 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, wf_t);
466 w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w0_t);
467 w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w1_t);
468 w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w2_t);
469 w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w3_t);
470 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w4_t);
471 w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, w5_t);
472 w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, w6_t);
473 w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, w7_t);
474 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, w8_t);
475 w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, w9_t);
476 wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wa_t);
477 wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, a, b, c, d, e, wb_t);
478 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, e, a, b, c, d, wc_t);
479 wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, d, e, a, b, c, wd_t);
480 we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
481 wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
493 w0_t = uint_to_hex_lower8 ((a >> 24) & 255) << 0
494 | uint_to_hex_lower8 ((a >> 16) & 255) << 16;
495 w1_t = uint_to_hex_lower8 ((a >> 8) & 255) << 0
496 | uint_to_hex_lower8 ((a >> 0) & 255) << 16;
497 w2_t = uint_to_hex_lower8 ((b >> 24) & 255) << 0
498 | uint_to_hex_lower8 ((b >> 16) & 255) << 16;
499 w3_t = uint_to_hex_lower8 ((b >> 8) & 255) << 0
500 | uint_to_hex_lower8 ((b >> 0) & 255) << 16;
501 w4_t = uint_to_hex_lower8 ((c >> 24) & 255) << 0
502 | uint_to_hex_lower8 ((c >> 16) & 255) << 16;
503 w5_t = uint_to_hex_lower8 ((c >> 8) & 255) << 0
504 | uint_to_hex_lower8 ((c >> 0) & 255) << 16;
505 w6_t = uint_to_hex_lower8 ((d >> 24) & 255) << 0
506 | uint_to_hex_lower8 ((d >> 16) & 255) << 16;
507 w7_t = uint_to_hex_lower8 ((d >> 8) & 255) << 0
508 | uint_to_hex_lower8 ((d >> 0) & 255) << 16;
509 w8_t = uint_to_hex_lower8 ((e >> 24) & 255) << 0
510 | uint_to_hex_lower8 ((e >> 16) & 255) << 16;
511 w9_t = uint_to_hex_lower8 ((e >> 8) & 255) << 0
512 | uint_to_hex_lower8 ((e >> 0) & 255) << 16;
526 MD5_STEP (MD5_Fo, a, b, c, d, w0_t, MD5C00, MD5S00);
527 MD5_STEP (MD5_Fo, d, a, b, c, w1_t, MD5C01, MD5S01);
528 MD5_STEP (MD5_Fo, c, d, a, b, w2_t, MD5C02, MD5S02);
529 MD5_STEP (MD5_Fo, b, c, d, a, w3_t, MD5C03, MD5S03);
530 MD5_STEP (MD5_Fo, a, b, c, d, w4_t, MD5C04, MD5S00);
531 MD5_STEP (MD5_Fo, d, a, b, c, w5_t, MD5C05, MD5S01);
532 MD5_STEP (MD5_Fo, c, d, a, b, w6_t, MD5C06, MD5S02);
533 MD5_STEP (MD5_Fo, b, c, d, a, w7_t, MD5C07, MD5S03);
534 MD5_STEP (MD5_Fo, a, b, c, d, w8_t, MD5C08, MD5S00);
535 MD5_STEP (MD5_Fo, d, a, b, c, w9_t, MD5C09, MD5S01);
536 MD5_STEP (MD5_Fo, c, d, a, b, wa_t, MD5C0a, MD5S02);
537 MD5_STEP (MD5_Fo, b, c, d, a, wb_t, MD5C0b, MD5S03);
538 MD5_STEP (MD5_Fo, a, b, c, d, wc_t, MD5C0c, MD5S00);
539 MD5_STEP (MD5_Fo, d, a, b, c, wd_t, MD5C0d, MD5S01);
540 MD5_STEP (MD5_Fo, c, d, a, b, we_t, MD5C0e, MD5S02);
541 MD5_STEP (MD5_Fo, b, c, d, a, wf_t, MD5C0f, MD5S03);
543 MD5_STEP (MD5_Go, a, b, c, d, w1_t, MD5C10, MD5S10);
544 MD5_STEP (MD5_Go, d, a, b, c, w6_t, MD5C11, MD5S11);
545 MD5_STEP (MD5_Go, c, d, a, b, wb_t, MD5C12, MD5S12);
546 MD5_STEP (MD5_Go, b, c, d, a, w0_t, MD5C13, MD5S13);
547 MD5_STEP (MD5_Go, a, b, c, d, w5_t, MD5C14, MD5S10);
548 MD5_STEP (MD5_Go, d, a, b, c, wa_t, MD5C15, MD5S11);
549 MD5_STEP (MD5_Go, c, d, a, b, wf_t, MD5C16, MD5S12);
550 MD5_STEP (MD5_Go, b, c, d, a, w4_t, MD5C17, MD5S13);
551 MD5_STEP (MD5_Go, a, b, c, d, w9_t, MD5C18, MD5S10);
552 MD5_STEP (MD5_Go, d, a, b, c, we_t, MD5C19, MD5S11);
553 MD5_STEP (MD5_Go, c, d, a, b, w3_t, MD5C1a, MD5S12);
554 MD5_STEP (MD5_Go, b, c, d, a, w8_t, MD5C1b, MD5S13);
555 MD5_STEP (MD5_Go, a, b, c, d, wd_t, MD5C1c, MD5S10);
556 MD5_STEP (MD5_Go, d, a, b, c, w2_t, MD5C1d, MD5S11);
557 MD5_STEP (MD5_Go, c, d, a, b, w7_t, MD5C1e, MD5S12);
558 MD5_STEP (MD5_Go, b, c, d, a, wc_t, MD5C1f, MD5S13);
560 MD5_STEP (MD5_H , a, b, c, d, w5_t, MD5C20, MD5S20);
561 MD5_STEP (MD5_H , d, a, b, c, w8_t, MD5C21, MD5S21);
562 MD5_STEP (MD5_H , c, d, a, b, wb_t, MD5C22, MD5S22);
563 MD5_STEP (MD5_H , b, c, d, a, we_t, MD5C23, MD5S23);
564 MD5_STEP (MD5_H , a, b, c, d, w1_t, MD5C24, MD5S20);
565 MD5_STEP (MD5_H , d, a, b, c, w4_t, MD5C25, MD5S21);
566 MD5_STEP (MD5_H , c, d, a, b, w7_t, MD5C26, MD5S22);
567 MD5_STEP (MD5_H , b, c, d, a, wa_t, MD5C27, MD5S23);
568 MD5_STEP (MD5_H , a, b, c, d, wd_t, MD5C28, MD5S20);
569 MD5_STEP (MD5_H , d, a, b, c, w0_t, MD5C29, MD5S21);
570 MD5_STEP (MD5_H , c, d, a, b, w3_t, MD5C2a, MD5S22);
571 MD5_STEP (MD5_H , b, c, d, a, w6_t, MD5C2b, MD5S23);
572 MD5_STEP (MD5_H , a, b, c, d, w9_t, MD5C2c, MD5S20);
573 MD5_STEP (MD5_H , d, a, b, c, wc_t, MD5C2d, MD5S21);
574 MD5_STEP (MD5_H , c, d, a, b, wf_t, MD5C2e, MD5S22);
575 MD5_STEP (MD5_H , b, c, d, a, w2_t, MD5C2f, MD5S23);
577 MD5_STEP (MD5_I , a, b, c, d, w0_t, MD5C30, MD5S30);
578 MD5_STEP (MD5_I , d, a, b, c, w7_t, MD5C31, MD5S31);
579 MD5_STEP (MD5_I , c, d, a, b, we_t, MD5C32, MD5S32);
580 MD5_STEP (MD5_I , b, c, d, a, w5_t, MD5C33, MD5S33);
581 MD5_STEP (MD5_I , a, b, c, d, wc_t, MD5C34, MD5S30);
582 MD5_STEP (MD5_I , d, a, b, c, w3_t, MD5C35, MD5S31);
583 MD5_STEP (MD5_I , c, d, a, b, wa_t, MD5C36, MD5S32);
584 MD5_STEP (MD5_I , b, c, d, a, w1_t, MD5C37, MD5S33);
585 MD5_STEP (MD5_I , a, b, c, d, w8_t, MD5C38, MD5S30);
586 MD5_STEP (MD5_I , d, a, b, c, wf_t, MD5C39, MD5S31);
587 MD5_STEP (MD5_I , c, d, a, b, w6_t, MD5C3a, MD5S32);
588 MD5_STEP (MD5_I , b, c, d, a, wd_t, MD5C3b, MD5S33);
589 MD5_STEP (MD5_I , a, b, c, d, w4_t, MD5C3c, MD5S30);
590 MD5_STEP (MD5_I , d, a, b, c, wb_t, MD5C3d, MD5S31);
591 MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
592 MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
599 #include VECT_COMPARE_S
603 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m04400_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
609 const u32 gid = get_global_id (0);
615 const u32 lid = get_local_id (0);
620 w0[0] = pws[gid].i[ 0];
621 w0[1] = pws[gid].i[ 1];
622 w0[2] = pws[gid].i[ 2];
623 w0[3] = pws[gid].i[ 3];
644 w3[3] = pws[gid].i[15];
646 const u32 pw_len = pws[gid].pw_len;
652 __local u32 l_bin2asc[256];
654 const u32 lid4 = lid * 4;
656 const u32 lid40 = lid4 + 0;
657 const u32 lid41 = lid4 + 1;
658 const u32 lid42 = lid4 + 2;
659 const u32 lid43 = lid4 + 3;
661 const u32 v400 = (lid40 >> 0) & 15;
662 const u32 v401 = (lid40 >> 4) & 15;
663 const u32 v410 = (lid41 >> 0) & 15;
664 const u32 v411 = (lid41 >> 4) & 15;
665 const u32 v420 = (lid42 >> 0) & 15;
666 const u32 v421 = (lid42 >> 4) & 15;
667 const u32 v430 = (lid43 >> 0) & 15;
668 const u32 v431 = (lid43 >> 4) & 15;
670 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 8
671 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 0;
672 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 8
673 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 0;
674 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 8
675 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 0;
676 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 8
677 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 0;
679 barrier (CLK_LOCAL_MEM_FENCE);
681 if (gid >= gid_max) return;
687 m04400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
690 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m04400_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
696 const u32 gid = get_global_id (0);
702 const u32 lid = get_local_id (0);
706 w0[0] = pws[gid].i[ 0];
707 w0[1] = pws[gid].i[ 1];
708 w0[2] = pws[gid].i[ 2];
709 w0[3] = pws[gid].i[ 3];
713 w1[0] = pws[gid].i[ 4];
714 w1[1] = pws[gid].i[ 5];
715 w1[2] = pws[gid].i[ 6];
716 w1[3] = pws[gid].i[ 7];
730 w3[3] = pws[gid].i[15];
732 const u32 pw_len = pws[gid].pw_len;
738 __local u32 l_bin2asc[256];
740 const u32 lid4 = lid * 4;
742 const u32 lid40 = lid4 + 0;
743 const u32 lid41 = lid4 + 1;
744 const u32 lid42 = lid4 + 2;
745 const u32 lid43 = lid4 + 3;
747 const u32 v400 = (lid40 >> 0) & 15;
748 const u32 v401 = (lid40 >> 4) & 15;
749 const u32 v410 = (lid41 >> 0) & 15;
750 const u32 v411 = (lid41 >> 4) & 15;
751 const u32 v420 = (lid42 >> 0) & 15;
752 const u32 v421 = (lid42 >> 4) & 15;
753 const u32 v430 = (lid43 >> 0) & 15;
754 const u32 v431 = (lid43 >> 4) & 15;
756 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 8
757 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 0;
758 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 8
759 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 0;
760 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 8
761 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 0;
762 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 8
763 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 0;
765 barrier (CLK_LOCAL_MEM_FENCE);
767 if (gid >= gid_max) return;
773 m04400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
776 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m04400_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
782 const u32 gid = get_global_id (0);
788 const u32 lid = get_local_id (0);
792 w0[0] = pws[gid].i[ 0];
793 w0[1] = pws[gid].i[ 1];
794 w0[2] = pws[gid].i[ 2];
795 w0[3] = pws[gid].i[ 3];
799 w1[0] = pws[gid].i[ 4];
800 w1[1] = pws[gid].i[ 5];
801 w1[2] = pws[gid].i[ 6];
802 w1[3] = pws[gid].i[ 7];
806 w2[0] = pws[gid].i[ 8];
807 w2[1] = pws[gid].i[ 9];
808 w2[2] = pws[gid].i[10];
809 w2[3] = pws[gid].i[11];
813 w3[0] = pws[gid].i[12];
814 w3[1] = pws[gid].i[13];
815 w3[2] = pws[gid].i[14];
816 w3[3] = pws[gid].i[15];
818 const u32 pw_len = pws[gid].pw_len;
824 __local u32 l_bin2asc[256];
826 const u32 lid4 = lid * 4;
828 const u32 lid40 = lid4 + 0;
829 const u32 lid41 = lid4 + 1;
830 const u32 lid42 = lid4 + 2;
831 const u32 lid43 = lid4 + 3;
833 const u32 v400 = (lid40 >> 0) & 15;
834 const u32 v401 = (lid40 >> 4) & 15;
835 const u32 v410 = (lid41 >> 0) & 15;
836 const u32 v411 = (lid41 >> 4) & 15;
837 const u32 v420 = (lid42 >> 0) & 15;
838 const u32 v421 = (lid42 >> 4) & 15;
839 const u32 v430 = (lid43 >> 0) & 15;
840 const u32 v431 = (lid43 >> 4) & 15;
842 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 8
843 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 0;
844 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 8
845 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 0;
846 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 8
847 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 0;
848 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 8
849 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 0;
851 barrier (CLK_LOCAL_MEM_FENCE);
853 if (gid >= gid_max) return;
859 m04400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
862 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m04400_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
868 const u32 gid = get_global_id (0);
874 const u32 lid = get_local_id (0);
878 w0[0] = pws[gid].i[ 0];
879 w0[1] = pws[gid].i[ 1];
880 w0[2] = pws[gid].i[ 2];
881 w0[3] = pws[gid].i[ 3];
902 w3[3] = pws[gid].i[15];
904 const u32 pw_len = pws[gid].pw_len;
910 __local u32 l_bin2asc[256];
912 const u32 lid4 = lid * 4;
914 const u32 lid40 = lid4 + 0;
915 const u32 lid41 = lid4 + 1;
916 const u32 lid42 = lid4 + 2;
917 const u32 lid43 = lid4 + 3;
919 const u32 v400 = (lid40 >> 0) & 15;
920 const u32 v401 = (lid40 >> 4) & 15;
921 const u32 v410 = (lid41 >> 0) & 15;
922 const u32 v411 = (lid41 >> 4) & 15;
923 const u32 v420 = (lid42 >> 0) & 15;
924 const u32 v421 = (lid42 >> 4) & 15;
925 const u32 v430 = (lid43 >> 0) & 15;
926 const u32 v431 = (lid43 >> 4) & 15;
928 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 8
929 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 0;
930 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 8
931 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 0;
932 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 8
933 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 0;
934 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 8
935 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 0;
937 barrier (CLK_LOCAL_MEM_FENCE);
939 if (gid >= gid_max) return;
945 m04400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
948 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m04400_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
954 const u32 gid = get_global_id (0);
960 const u32 lid = get_local_id (0);
964 w0[0] = pws[gid].i[ 0];
965 w0[1] = pws[gid].i[ 1];
966 w0[2] = pws[gid].i[ 2];
967 w0[3] = pws[gid].i[ 3];
971 w1[0] = pws[gid].i[ 4];
972 w1[1] = pws[gid].i[ 5];
973 w1[2] = pws[gid].i[ 6];
974 w1[3] = pws[gid].i[ 7];
988 w3[3] = pws[gid].i[15];
990 const u32 pw_len = pws[gid].pw_len;
996 __local u32 l_bin2asc[256];
998 const u32 lid4 = lid * 4;
1000 const u32 lid40 = lid4 + 0;
1001 const u32 lid41 = lid4 + 1;
1002 const u32 lid42 = lid4 + 2;
1003 const u32 lid43 = lid4 + 3;
1005 const u32 v400 = (lid40 >> 0) & 15;
1006 const u32 v401 = (lid40 >> 4) & 15;
1007 const u32 v410 = (lid41 >> 0) & 15;
1008 const u32 v411 = (lid41 >> 4) & 15;
1009 const u32 v420 = (lid42 >> 0) & 15;
1010 const u32 v421 = (lid42 >> 4) & 15;
1011 const u32 v430 = (lid43 >> 0) & 15;
1012 const u32 v431 = (lid43 >> 4) & 15;
1014 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 8
1015 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 0;
1016 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 8
1017 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 0;
1018 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 8
1019 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 0;
1020 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 8
1021 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 0;
1023 barrier (CLK_LOCAL_MEM_FENCE);
1025 if (gid >= gid_max) return;
1031 m04400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);
1034 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m04400_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
1040 const u32 gid = get_global_id (0);
1046 const u32 lid = get_local_id (0);
1050 w0[0] = pws[gid].i[ 0];
1051 w0[1] = pws[gid].i[ 1];
1052 w0[2] = pws[gid].i[ 2];
1053 w0[3] = pws[gid].i[ 3];
1057 w1[0] = pws[gid].i[ 4];
1058 w1[1] = pws[gid].i[ 5];
1059 w1[2] = pws[gid].i[ 6];
1060 w1[3] = pws[gid].i[ 7];
1064 w2[0] = pws[gid].i[ 8];
1065 w2[1] = pws[gid].i[ 9];
1066 w2[2] = pws[gid].i[10];
1067 w2[3] = pws[gid].i[11];
1071 w3[0] = pws[gid].i[12];
1072 w3[1] = pws[gid].i[13];
1073 w3[2] = pws[gid].i[14];
1074 w3[3] = pws[gid].i[15];
1076 const u32 pw_len = pws[gid].pw_len;
1082 __local u32 l_bin2asc[256];
1084 const u32 lid4 = lid * 4;
1086 const u32 lid40 = lid4 + 0;
1087 const u32 lid41 = lid4 + 1;
1088 const u32 lid42 = lid4 + 2;
1089 const u32 lid43 = lid4 + 3;
1091 const u32 v400 = (lid40 >> 0) & 15;
1092 const u32 v401 = (lid40 >> 4) & 15;
1093 const u32 v410 = (lid41 >> 0) & 15;
1094 const u32 v411 = (lid41 >> 4) & 15;
1095 const u32 v420 = (lid42 >> 0) & 15;
1096 const u32 v421 = (lid42 >> 4) & 15;
1097 const u32 v430 = (lid43 >> 0) & 15;
1098 const u32 v431 = (lid43 >> 4) & 15;
1100 l_bin2asc[lid40] = ((v400 < 10) ? '0' + v400 : 'a' - 10 + v400) << 8
1101 | ((v401 < 10) ? '0' + v401 : 'a' - 10 + v401) << 0;
1102 l_bin2asc[lid41] = ((v410 < 10) ? '0' + v410 : 'a' - 10 + v410) << 8
1103 | ((v411 < 10) ? '0' + v411 : 'a' - 10 + v411) << 0;
1104 l_bin2asc[lid42] = ((v420 < 10) ? '0' + v420 : 'a' - 10 + v420) << 8
1105 | ((v421 < 10) ? '0' + v421 : 'a' - 10 + v421) << 0;
1106 l_bin2asc[lid43] = ((v430 < 10) ? '0' + v430 : 'a' - 10 + v430) << 8
1107 | ((v431 < 10) ? '0' + v431 : 'a' - 10 + v431) << 0;
1109 barrier (CLK_LOCAL_MEM_FENCE);
1111 if (gid >= gid_max) return;
1117 m04400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset, l_bin2asc);