- Added inline declaration to functions from simd.c, common.c, rp.c and types_ocl...
[hashcat.git] / OpenCL / types_ocl.c
1 /**
2 * Authors.....: Jens Steube <jens.steube@gmail.com>
3 * magnum <john.magnum@hushmail.com>
4 *
5 * License.....: MIT
6 */
7
8 #define DEVICE_TYPE_CPU 2
9 #define DEVICE_TYPE_GPU 4
10
11 typedef uchar u8;
12 typedef ushort u16;
13 typedef uint u32;
14 typedef ulong u64;
15
16 #ifndef NEW_SIMD_CODE
17 #undef VECT_SIZE
18 #define VECT_SIZE 1
19 #endif
20
21 #define CONCAT(a, b) a##b
22 #define VTYPE(type, width) CONCAT(type, width)
23
24 #if VECT_SIZE == 1
25 typedef uchar u8x;
26 typedef ushort u16x;
27 typedef uint u32x;
28 typedef ulong u64x;
29 #else
30 typedef VTYPE(uchar, VECT_SIZE) u8x;
31 typedef VTYPE(ushort, VECT_SIZE) u16x;
32 typedef VTYPE(uint, VECT_SIZE) u32x;
33 typedef VTYPE(ulong, VECT_SIZE) u64x;
34 #endif
35
36 inline u32 l32_from_64_S (u64 a)
37 {
38 const u32 r = (u32) (a);
39
40 return r;
41 }
42
43 inline u32 h32_from_64_S (u64 a)
44 {
45 a >>= 32;
46
47 const u32 r = (u32) (a);
48
49 return r;
50 }
51
52 inline u64 hl32_to_64_S (const u32 a, const u32 b)
53 {
54 return as_ulong ((uint2) (b, a));
55 }
56
57 inline u32x l32_from_64 (u64x a)
58 {
59 u32x r;
60
61 #if VECT_SIZE == 1
62 r = (u32) a;
63 #endif
64
65 #if VECT_SIZE >= 2
66 r.s0 = (u32) a.s0;
67 r.s1 = (u32) a.s1;
68 #endif
69
70 #if VECT_SIZE >= 4
71 r.s2 = (u32) a.s2;
72 r.s3 = (u32) a.s3;
73 #endif
74
75 #if VECT_SIZE >= 8
76 r.s4 = (u32) a.s4;
77 r.s5 = (u32) a.s5;
78 r.s6 = (u32) a.s6;
79 r.s7 = (u32) a.s7;
80 #endif
81
82 #if VECT_SIZE >= 16
83 r.s8 = (u32) a.s8;
84 r.s9 = (u32) a.s9;
85 r.sa = (u32) a.sa;
86 r.sb = (u32) a.sb;
87 r.sc = (u32) a.sc;
88 r.sd = (u32) a.sd;
89 r.se = (u32) a.se;
90 r.sf = (u32) a.sf;
91 #endif
92
93 return r;
94 }
95
96 inline u32x h32_from_64 (u64x a)
97 {
98 a >>= 32;
99
100 u32x r;
101
102 #if VECT_SIZE == 1
103 r = (u32) a;
104 #endif
105
106 #if VECT_SIZE >= 2
107 r.s0 = (u32) a.s0;
108 r.s1 = (u32) a.s1;
109 #endif
110
111 #if VECT_SIZE >= 4
112 r.s2 = (u32) a.s2;
113 r.s3 = (u32) a.s3;
114 #endif
115
116 #if VECT_SIZE >= 8
117 r.s4 = (u32) a.s4;
118 r.s5 = (u32) a.s5;
119 r.s6 = (u32) a.s6;
120 r.s7 = (u32) a.s7;
121 #endif
122
123 #if VECT_SIZE >= 16
124 r.s8 = (u32) a.s8;
125 r.s9 = (u32) a.s9;
126 r.sa = (u32) a.sa;
127 r.sb = (u32) a.sb;
128 r.sc = (u32) a.sc;
129 r.sd = (u32) a.sd;
130 r.se = (u32) a.se;
131 r.sf = (u32) a.sf;
132 #endif
133
134 return r;
135 }
136
137 inline u64x hl32_to_64 (const u32x a, const u32x b)
138 {
139 u64x r;
140
141 #if VECT_SIZE == 1
142 r = as_ulong ((uint2) (b, a));
143 #endif
144
145 #if VECT_SIZE >= 2
146 r.s0 = as_ulong ((uint2) (b.s0, a.s0));
147 r.s1 = as_ulong ((uint2) (b.s1, a.s1));
148 #endif
149
150 #if VECT_SIZE >= 4
151 r.s2 = as_ulong ((uint2) (b.s2, a.s2));
152 r.s3 = as_ulong ((uint2) (b.s3, a.s3));
153 #endif
154
155 #if VECT_SIZE >= 8
156 r.s4 = as_ulong ((uint2) (b.s4, a.s4));
157 r.s5 = as_ulong ((uint2) (b.s5, a.s5));
158 r.s6 = as_ulong ((uint2) (b.s6, a.s6));
159 r.s7 = as_ulong ((uint2) (b.s7, a.s7));
160 #endif
161
162 #if VECT_SIZE >= 16
163 r.s8 = as_ulong ((uint2) (b.s8, a.s8));
164 r.s9 = as_ulong ((uint2) (b.s9, a.s9));
165 r.sa = as_ulong ((uint2) (b.sa, a.sa));
166 r.sb = as_ulong ((uint2) (b.sb, a.sb));
167 r.sc = as_ulong ((uint2) (b.sc, a.sc));
168 r.sd = as_ulong ((uint2) (b.sd, a.sd));
169 r.se = as_ulong ((uint2) (b.se, a.se));
170 r.sf = as_ulong ((uint2) (b.sf, a.sf));
171 #endif
172
173 return r;
174 }
175
176 #ifdef IS_AMD
177 inline u32 swap32_S (const u32 v)
178 {
179 return (as_uint (as_uchar4 (v).s3210));
180 }
181
182 inline u64 swap64_S (const u64 v)
183 {
184 return (as_ulong (as_uchar8 (v).s76543210));
185 }
186
187 inline u32 rotr32_S (const u32 a, const u32 n)
188 {
189 return rotate (a, 32 - n);
190 }
191
192 inline u32 rotl32_S (const u32 a, const u32 n)
193 {
194 return rotate (a, n);
195 }
196
197 inline u64 rotr64_S (const u64 a, const u32 n)
198 {
199 const u32 a0 = h32_from_64_S (a);
200 const u32 a1 = l32_from_64_S (a);
201
202 const u32 t0 = (n >= 32) ? amd_bitalign (a0, a1, n - 32) : amd_bitalign (a1, a0, n);
203 const u32 t1 = (n >= 32) ? amd_bitalign (a1, a0, n - 32) : amd_bitalign (a0, a1, n);
204
205 const u64 r = hl32_to_64_S (t0, t1);
206
207 return r;
208 }
209
210 inline u64 rotl64_S (const u64 a, const u32 n)
211 {
212 return rotr64_S (a, 64 - n);
213 }
214
215 inline u32x swap32 (const u32x v)
216 {
217 return ((v >> 24) & 0x000000ff)
218 | ((v >> 8) & 0x0000ff00)
219 | ((v << 8) & 0x00ff0000)
220 | ((v << 24) & 0xff000000);
221 }
222
223 inline u64x swap64 (const u64x v)
224 {
225 return ((v >> 56) & 0x00000000000000ff)
226 | ((v >> 40) & 0x000000000000ff00)
227 | ((v >> 24) & 0x0000000000ff0000)
228 | ((v >> 8) & 0x00000000ff000000)
229 | ((v << 8) & 0x000000ff00000000)
230 | ((v << 24) & 0x0000ff0000000000)
231 | ((v << 40) & 0x00ff000000000000)
232 | ((v << 56) & 0xff00000000000000);
233 }
234
235 inline u32x rotr32 (const u32x a, const u32 n)
236 {
237 return rotate (a, 32 - n);
238 }
239
240 inline u32x rotl32 (const u32x a, const u32 n)
241 {
242 return rotate (a, n);
243 }
244
245 inline u64x rotr64 (const u64x a, const u32 n)
246 {
247 const u32x a0 = h32_from_64 (a);
248 const u32x a1 = l32_from_64 (a);
249
250 const u32x t0 = (n >= 32) ? amd_bitalign (a0, a1, n - 32) : amd_bitalign (a1, a0, n);
251 const u32x t1 = (n >= 32) ? amd_bitalign (a1, a0, n - 32) : amd_bitalign (a0, a1, n);
252
253 const u64x r = hl32_to_64 (t0, t1);
254
255 return r;
256 }
257
258 inline u64x rotl64 (const u64x a, const u32 n)
259 {
260 return rotr64 (a, 64 - n);
261 }
262
263 inline u32 __bfe (const u32 a, const u32 b, const u32 c)
264 {
265 return amd_bfe (a, b, c);
266 }
267
268 inline u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
269 {
270 return amd_bytealign (a, b, c);
271 }
272 #endif
273
274 #ifdef IS_NV
275 inline u32 swap32_S (const u32 v)
276 {
277 u32 r;
278
279 asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v));
280
281 return r;
282 }
283
284 inline u64 swap64_S (const u64 v)
285 {
286 u32 il;
287 u32 ir;
288
289 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v));
290
291 u32 tl;
292 u32 tr;
293
294 asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il));
295 asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir));
296
297 u64 r;
298
299 asm ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl));
300
301 return r;
302 }
303
304 inline u32 rotr32_S (const u32 a, const u32 n)
305 {
306 return rotate (a, 32 - n);
307 }
308
309 inline u32 rotl32_S (const u32 a, const u32 n)
310 {
311 return rotate (a, n);
312 }
313
314 #if CUDA_ARCH >= 350
315 inline u64 rotr64_S (const u64 a, const u32 n)
316 {
317 u32 il;
318 u32 ir;
319
320 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a));
321
322 u32 tl;
323 u32 tr;
324
325 if (n >= 32)
326 {
327 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
328 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
329 }
330 else
331 {
332 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
333 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
334 }
335
336 u64 r;
337
338 asm ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tl), "r"(tr));
339
340 return r;
341 }
342 #else
343 inline u64 rotr64_S (const u64 a, const u32 n)
344 {
345 return rotate (a, (u64) 64 - n);
346 }
347 #endif
348
349 inline u64 rotl64_S (const u64 a, const u32 n)
350 {
351 return rotr64_S (a, 64 - n);
352 }
353
354 inline u32 __byte_perm_S (const u32 a, const u32 b, const u32 c)
355 {
356 u32 r;
357
358 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
359
360 return r;
361 }
362
363 inline u32x swap32 (const u32x v)
364 {
365 return ((v >> 24) & 0x000000ff)
366 | ((v >> 8) & 0x0000ff00)
367 | ((v << 8) & 0x00ff0000)
368 | ((v << 24) & 0xff000000);
369 }
370
371 inline u64x swap64 (const u64x v)
372 {
373 return ((v >> 56) & 0x00000000000000ff)
374 | ((v >> 40) & 0x000000000000ff00)
375 | ((v >> 24) & 0x0000000000ff0000)
376 | ((v >> 8) & 0x00000000ff000000)
377 | ((v << 8) & 0x000000ff00000000)
378 | ((v << 24) & 0x0000ff0000000000)
379 | ((v << 40) & 0x00ff000000000000)
380 | ((v << 56) & 0xff00000000000000);
381 }
382
383 inline u32x rotr32 (const u32x a, const u32 n)
384 {
385 return rotate (a, 32 - n);
386 }
387
388 inline u32x rotl32 (const u32x a, const u32 n)
389 {
390 return rotate (a, n);
391 }
392
393 #if CUDA_ARCH >= 350
394 inline u64x rotr64 (const u64x a, const u32 n)
395 {
396 u64x r;
397
398 u32 il;
399 u32 ir;
400 u32 tl;
401 u32 tr;
402
403 #if VECT_SIZE == 1
404
405 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a));
406
407 if (n >= 32)
408 {
409 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
410 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
411 }
412 else
413 {
414 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
415 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
416 }
417
418 asm ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tl), "r"(tr));
419
420 #endif
421
422 #if VECT_SIZE >= 2
423
424 {
425 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s0));
426
427 if (n >= 32)
428 {
429 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
430 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
431 }
432 else
433 {
434 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
435 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
436 }
437
438 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s0) : "r"(tl), "r"(tr));
439 }
440
441 {
442 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s1));
443
444 if (n >= 32)
445 {
446 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
447 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
448 }
449 else
450 {
451 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
452 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
453 }
454
455 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s1) : "r"(tl), "r"(tr));
456 }
457
458 #endif
459
460 #if VECT_SIZE >= 4
461
462 {
463 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s2));
464
465 if (n >= 32)
466 {
467 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
468 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
469 }
470 else
471 {
472 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
473 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
474 }
475
476 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s2) : "r"(tl), "r"(tr));
477 }
478
479 {
480 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s3));
481
482 if (n >= 32)
483 {
484 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
485 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
486 }
487 else
488 {
489 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
490 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
491 }
492
493 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s3) : "r"(tl), "r"(tr));
494 }
495
496 #endif
497
498 #if VECT_SIZE >= 8
499
500 {
501 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s4));
502
503 if (n >= 32)
504 {
505 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
506 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
507 }
508 else
509 {
510 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
511 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
512 }
513
514 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s4) : "r"(tl), "r"(tr));
515 }
516
517 {
518 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s5));
519
520 if (n >= 32)
521 {
522 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
523 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
524 }
525 else
526 {
527 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
528 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
529 }
530
531 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s5) : "r"(tl), "r"(tr));
532 }
533
534 {
535 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s6));
536
537 if (n >= 32)
538 {
539 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
540 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
541 }
542 else
543 {
544 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
545 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
546 }
547
548 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s6) : "r"(tl), "r"(tr));
549 }
550
551 {
552 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s7));
553
554 if (n >= 32)
555 {
556 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
557 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
558 }
559 else
560 {
561 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
562 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
563 }
564
565 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s7) : "r"(tl), "r"(tr));
566 }
567
568 #endif
569
570 #if VECT_SIZE >= 16
571
572 {
573 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s8));
574
575 if (n >= 32)
576 {
577 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
578 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
579 }
580 else
581 {
582 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
583 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
584 }
585
586 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s8) : "r"(tl), "r"(tr));
587 }
588
589 {
590 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.s9));
591
592 if (n >= 32)
593 {
594 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
595 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
596 }
597 else
598 {
599 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
600 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
601 }
602
603 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.s9) : "r"(tl), "r"(tr));
604 }
605
606 {
607 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.sa));
608
609 if (n >= 32)
610 {
611 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
612 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
613 }
614 else
615 {
616 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
617 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
618 }
619
620 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.sa) : "r"(tl), "r"(tr));
621 }
622
623 {
624 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.sb));
625
626 if (n >= 32)
627 {
628 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
629 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
630 }
631 else
632 {
633 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
634 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
635 }
636
637 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.sb) : "r"(tl), "r"(tr));
638 }
639
640 {
641 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.sc));
642
643 if (n >= 32)
644 {
645 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
646 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
647 }
648 else
649 {
650 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
651 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
652 }
653
654 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.sc) : "r"(tl), "r"(tr));
655 }
656
657 {
658 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.sd));
659
660 if (n >= 32)
661 {
662 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
663 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
664 }
665 else
666 {
667 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
668 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
669 }
670
671 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.sd) : "r"(tl), "r"(tr));
672 }
673
674 {
675 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.se));
676
677 if (n >= 32)
678 {
679 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
680 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
681 }
682 else
683 {
684 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
685 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
686 }
687
688 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.se) : "r"(tl), "r"(tr));
689 }
690
691 {
692 asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a.sf));
693
694 if (n >= 32)
695 {
696 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
697 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
698 }
699 else
700 {
701 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
702 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
703 }
704
705 asm ("mov.b64 %0, {%1, %2};" : "=l"(r.sf) : "r"(tl), "r"(tr));
706 }
707
708 #endif
709
710 return r;
711 }
712 #else
713 inline u64x rotr64 (const u64x a, const u32 n)
714 {
715 return rotate (a, (u64) 64 - n);
716 }
717 #endif
718
719 inline u64x rotl64 (const u64x a, const u32 n)
720 {
721 return rotr64 (a, (u64) 64 - n);
722 }
723
724 inline u32x __byte_perm (const u32x a, const u32x b, const u32x c)
725 {
726 u32x r;
727
728 #if VECT_SIZE == 1
729 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c) );
730 #endif
731
732 #if VECT_SIZE >= 2
733 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
734 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
735 #endif
736
737 #if VECT_SIZE >= 4
738 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2));
739 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3));
740 #endif
741
742 #if VECT_SIZE >= 8
743 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4));
744 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5));
745 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6));
746 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7));
747 #endif
748
749 #if VECT_SIZE >= 16
750 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(a.s8), "r"(b.s8), "r"(c.s8));
751 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(a.s9), "r"(b.s9), "r"(c.s9));
752 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(a.sa), "r"(b.sa), "r"(c.sa));
753 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(a.sb), "r"(b.sb), "r"(c.sb));
754 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(a.sc), "r"(b.sc), "r"(c.sc));
755 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(a.sd), "r"(b.sd), "r"(c.sd));
756 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(a.se), "r"(b.se), "r"(c.se));
757 asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(a.sf), "r"(b.sf), "r"(c.sf));
758 #endif
759
760 return r;
761 }
762
763 inline u32 __bfe (const u32 a, const u32 b, const u32 c)
764 {
765 u32 r;
766
767 asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
768
769 return r;
770 }
771
772 #if CUDA_ARCH >= 350
773 inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
774 {
775 u32 r;
776
777 asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8));
778
779 return r;
780 }
781 #else
782 inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
783 {
784 return __byte_perm_S (b, a, (0x76543210 >> ((c & 3) * 4)) & 0xffff);
785 }
786 #endif
787
788 #endif
789
790 #ifdef IS_GENERIC
791 inline u32 swap32_S (const u32 v)
792 {
793 return (as_uint (as_uchar4 (v).s3210));
794 }
795
796 inline u64 swap64_S (const u64 v)
797 {
798 return (as_ulong (as_uchar8 (v).s76543210));
799 }
800
801 inline u32 rotr32_S (const u32 a, const u32 n)
802 {
803 return rotate (a, 32 - n);
804 }
805
806 inline u32 rotl32_S (const u32 a, const u32 n)
807 {
808 return rotate (a, n);
809 }
810
811 inline u64 rotr64_S (const u64 a, const u32 n)
812 {
813 return rotate (a, (u64) 64 - n);
814 }
815
816 inline u64 rotl64_S (const u64 a, const u32 n)
817 {
818 return rotate (a, (u64) n);
819 }
820
821 inline u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
822 {
823 const u64 tmp = ((((u64) a) << 32) | ((u64) b)) >> ((c & 3) * 8);
824
825 return (u32) (tmp);
826 }
827
828 inline u32x swap32 (const u32x v)
829 {
830 return ((v >> 24) & 0x000000ff)
831 | ((v >> 8) & 0x0000ff00)
832 | ((v << 8) & 0x00ff0000)
833 | ((v << 24) & 0xff000000);
834 }
835
836 inline u64x swap64 (const u64x v)
837 {
838 return ((v >> 56) & 0x00000000000000ff)
839 | ((v >> 40) & 0x000000000000ff00)
840 | ((v >> 24) & 0x0000000000ff0000)
841 | ((v >> 8) & 0x00000000ff000000)
842 | ((v << 8) & 0x000000ff00000000)
843 | ((v << 24) & 0x0000ff0000000000)
844 | ((v << 40) & 0x00ff000000000000)
845 | ((v << 56) & 0xff00000000000000);
846 }
847
848 inline u32x rotr32 (const u32x a, const u32 n)
849 {
850 return rotate (a, 32 - n);
851 }
852
853 inline u32x rotl32 (const u32x a, const u32 n)
854 {
855 return rotate (a, n);
856 }
857
858 inline u64x rotr64 (const u64x a, const u32 n)
859 {
860 return rotate (a, (u64) 64 - n);
861 }
862
863 inline u64x rotl64 (const u64x a, const u32 n)
864 {
865 return rotate (a, (u64) n);
866 }
867
868 inline u32 __bfe (const u32 a, const u32 b, const u32 c)
869 {
870 #define BIT(x) (1 << (x))
871 #define BIT_MASK(x) (BIT (x) - 1)
872 #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z))
873
874 return BFE (a, b, c);
875 }
876
877 inline u32x amd_bytealign (const u32x a, const u32x b, const u32 c)
878 {
879 #if VECT_SIZE == 1
880 const u64x tmp = ((((u64x) (a)) << 32) | ((u64x) (b))) >> ((c & 3) * 8);
881
882 return (u32x) (tmp);
883 #endif
884
885 #if VECT_SIZE == 2
886 const u64x tmp = ((((u64x) (a.s0, a.s1)) << 32) | ((u64x) (b.s0, b.s1))) >> ((c & 3) * 8);
887
888 return (u32x) (tmp.s0, tmp.s1);
889 #endif
890
891 #if VECT_SIZE == 4
892 const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3))) >> ((c & 3) * 8);
893
894 return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3);
895 #endif
896
897 #if VECT_SIZE == 8
898 const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3, a.s4, a.s5, a.s6, a.s7)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3, b.s4, b.s5, b.s6, b.s7))) >> ((c & 3) * 8);
899
900 return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3, tmp.s4, tmp.s5, tmp.s6, tmp.s7);
901 #endif
902
903 #if VECT_SIZE == 16
904 const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3, a.s4, a.s5, a.s6, a.s7, a.s8, a.s9, a.sa, a.sb, a.sc, a.sd, a.se, a.sf)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3, b.s4, b.s5, b.s6, b.s7, b.s8, b.s9, b.sa, b.sb, b.sc, b.sd, b.se, b.sf))) >> ((c & 3) * 8);
905
906 return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3, tmp.s4, tmp.s5, tmp.s6, tmp.s7, tmp.s8, tmp.s9, tmp.sa, tmp.sb, tmp.sc, tmp.sd, tmp.se, tmp.sf);
907 #endif
908 }
909 #endif
910
911 typedef struct
912 {
913 #if defined _DES_
914 u32 digest_buf[4];
915 #elif defined _MD4_
916 u32 digest_buf[4];
917 #elif defined _MD5_
918 u32 digest_buf[4];
919 #elif defined _MD5H_
920 u32 digest_buf[4];
921 #elif defined _SHA1_
922 u32 digest_buf[5];
923 #elif defined _BCRYPT_
924 u32 digest_buf[6];
925 #elif defined _SHA256_
926 u32 digest_buf[8];
927 #elif defined _SHA384_
928 u32 digest_buf[16];
929 #elif defined _SHA512_
930 u32 digest_buf[16];
931 #elif defined _KECCAK_
932 u32 digest_buf[50];
933 #elif defined _RIPEMD160_
934 u32 digest_buf[5];
935 #elif defined _WHIRLPOOL_
936 u32 digest_buf[16];
937 #elif defined _GOST_
938 u32 digest_buf[8];
939 #elif defined _GOST2012_256_
940 u32 digest_buf[8];
941 #elif defined _GOST2012_512_
942 u32 digest_buf[16];
943 #elif defined _SAPB_
944 u32 digest_buf[4];
945 #elif defined _SAPG_
946 u32 digest_buf[5];
947 #elif defined _MYSQL323_
948 u32 digest_buf[4];
949 #elif defined _LOTUS5_
950 u32 digest_buf[4];
951 #elif defined _LOTUS6_
952 u32 digest_buf[4];
953 #elif defined _SCRYPT_
954 u32 digest_buf[8];
955 #elif defined _LOTUS8_
956 u32 digest_buf[4];
957 #elif defined _OFFICE2007_
958 u32 digest_buf[4];
959 #elif defined _OFFICE2010_
960 u32 digest_buf[4];
961 #elif defined _OFFICE2013_
962 u32 digest_buf[4];
963 #elif defined _OLDOFFICE01_
964 u32 digest_buf[4];
965 #elif defined _OLDOFFICE34_
966 u32 digest_buf[4];
967 #elif defined _SIPHASH_
968 u32 digest_buf[4];
969 #elif defined _PBKDF2_MD5_
970 u32 digest_buf[32];
971 #elif defined _PBKDF2_SHA1_
972 u32 digest_buf[32];
973 #elif defined _PBKDF2_SHA256_
974 u32 digest_buf[32];
975 #elif defined _PBKDF2_SHA512_
976 u32 digest_buf[32];
977 #elif defined _PDF17L8_
978 u32 digest_buf[8];
979 #elif defined _CRC32_
980 u32 digest_buf[4];
981 #elif defined _SEVEN_ZIP_
982 u32 digest_buf[4];
983 #elif defined _ANDROIDFDE_
984 u32 digest_buf[4];
985 #elif defined _DCC2_
986 u32 digest_buf[4];
987 #elif defined _WPA_
988 u32 digest_buf[4];
989 #elif defined _MD5_SHA1_
990 u32 digest_buf[4];
991 #elif defined _SHA1_MD5_
992 u32 digest_buf[5];
993 #elif defined _NETNTLMV2_
994 u32 digest_buf[4];
995 #elif defined _KRB5PA_
996 u32 digest_buf[4];
997 #elif defined _CLOUDKEY_
998 u32 digest_buf[8];
999 #elif defined _SCRYPT_
1000 u32 digest_buf[4];
1001 #elif defined _PSAFE2_
1002 u32 digest_buf[5];
1003 #elif defined _LOTUS8_
1004 u32 digest_buf[4];
1005 #elif defined _RAR3_
1006 u32 digest_buf[4];
1007 #elif defined _SHA256_SHA1_
1008 u32 digest_buf[8];
1009 #elif defined _MS_DRSR_
1010 u32 digest_buf[8];
1011 #elif defined _ANDROIDFDE_SAMSUNG_
1012 u32 digest_buf[8];
1013 #elif defined _RAR5_
1014 u32 digest_buf[4];
1015 #elif defined _KRB5TGS_
1016 u32 digest_buf[4];
1017 #elif defined _AXCRYPT_
1018 u32 digest_buf[4];
1019 #elif defined _KEEPASS_
1020 u32 digest_buf[4];
1021 #endif
1022
1023 } digest_t;
1024
1025 typedef struct
1026 {
1027 u32 salt_buf[16];
1028 u32 salt_buf_pc[8];
1029
1030 u32 salt_len;
1031 u32 salt_iter;
1032 u32 salt_sign[2];
1033
1034 u32 keccak_mdlen;
1035 u32 truecrypt_mdlen;
1036
1037 u32 digests_cnt;
1038 u32 digests_done;
1039
1040 u32 digests_offset;
1041
1042 u32 scrypt_N;
1043 u32 scrypt_r;
1044 u32 scrypt_p;
1045 u32 scrypt_tmto;
1046 u32 scrypt_phy;
1047
1048 } salt_t;
1049
1050 typedef struct
1051 {
1052 int V;
1053 int R;
1054 int P;
1055
1056 int enc_md;
1057
1058 u32 id_buf[8];
1059 u32 u_buf[32];
1060 u32 o_buf[32];
1061
1062 int id_len;
1063 int o_len;
1064 int u_len;
1065
1066 u32 rc4key[2];
1067 u32 rc4data[2];
1068
1069 } pdf_t;
1070
1071 typedef struct
1072 {
1073 u32 pke[25];
1074 u32 eapol[64];
1075 int eapol_size;
1076 int keyver;
1077 u8 orig_mac1[6];
1078 u8 orig_mac2[6];
1079 u8 orig_nonce1[32];
1080 u8 orig_nonce2[32];
1081
1082 } wpa_t;
1083
1084 typedef struct
1085 {
1086 u32 cry_master_buf[64];
1087 u32 ckey_buf[64];
1088 u32 public_key_buf[64];
1089
1090 u32 cry_master_len;
1091 u32 ckey_len;
1092 u32 public_key_len;
1093
1094 } bitcoin_wallet_t;
1095
1096 typedef struct
1097 {
1098 u32 salt_buf[30];
1099 u32 salt_len;
1100
1101 u32 esalt_buf[38];
1102 u32 esalt_len;
1103
1104 } sip_t;
1105
1106 typedef struct
1107 {
1108 u32 data[384];
1109
1110 } androidfde_t;
1111
1112 typedef struct
1113 {
1114 u32 nr_buf[16];
1115 u32 nr_len;
1116
1117 u32 msg_buf[128];
1118 u32 msg_len;
1119
1120 } ikepsk_t;
1121
1122 typedef struct
1123 {
1124 u32 user_len;
1125 u32 domain_len;
1126 u32 srvchall_len;
1127 u32 clichall_len;
1128
1129 u32 userdomain_buf[64];
1130 u32 chall_buf[256];
1131
1132 } netntlm_t;
1133
1134 typedef struct
1135 {
1136 u32 user[16];
1137 u32 realm[16];
1138 u32 salt[32];
1139 u32 timestamp[16];
1140 u32 checksum[4];
1141
1142 } krb5pa_t;
1143
1144 typedef struct
1145 {
1146 u32 account_info[512];
1147 u32 checksum[4];
1148 u32 edata2[2560];
1149 u32 edata2_len;
1150
1151 } krb5tgs_t;
1152
1153 typedef struct
1154 {
1155 u32 salt_buf[16];
1156 u32 data_buf[112];
1157 u32 keyfile_buf[16];
1158
1159 } tc_t;
1160
1161 typedef struct
1162 {
1163 u32 salt_buf[16];
1164
1165 } pbkdf2_md5_t;
1166
1167 typedef struct
1168 {
1169 u32 salt_buf[16];
1170
1171 } pbkdf2_sha1_t;
1172
1173 typedef struct
1174 {
1175 u32 salt_buf[16];
1176
1177 } pbkdf2_sha256_t;
1178
1179 typedef struct
1180 {
1181 u32 salt_buf[32];
1182
1183 } pbkdf2_sha512_t;
1184
1185 typedef struct
1186 {
1187 u32 salt_buf[128];
1188 u32 salt_len;
1189
1190 } rakp_t;
1191
1192 typedef struct
1193 {
1194 u32 data_len;
1195 u32 data_buf[512];
1196
1197 } cloudkey_t;
1198
1199 typedef struct
1200 {
1201 u32 encryptedVerifier[4];
1202 u32 encryptedVerifierHash[5];
1203
1204 u32 keySize;
1205
1206 } office2007_t;
1207
1208 typedef struct
1209 {
1210 u32 encryptedVerifier[4];
1211 u32 encryptedVerifierHash[8];
1212
1213 } office2010_t;
1214
1215 typedef struct
1216 {
1217 u32 encryptedVerifier[4];
1218 u32 encryptedVerifierHash[8];
1219
1220 } office2013_t;
1221
1222 typedef struct
1223 {
1224 u32 version;
1225 u32 encryptedVerifier[4];
1226 u32 encryptedVerifierHash[4];
1227 u32 rc4key[2];
1228
1229 } oldoffice01_t;
1230
1231 typedef struct
1232 {
1233 u32 version;
1234 u32 encryptedVerifier[4];
1235 u32 encryptedVerifierHash[5];
1236 u32 rc4key[2];
1237
1238 } oldoffice34_t;
1239
1240 typedef struct
1241 {
1242 u32 salt_buf[128];
1243 u32 salt_len;
1244
1245 u32 pc_digest[5];
1246 u32 pc_offset;
1247
1248 } pstoken_t;
1249
1250 typedef struct
1251 {
1252 u32 version;
1253 u32 algorithm;
1254
1255 /* key-file handling */
1256 u32 keyfile_len;
1257 u32 keyfile[8];
1258
1259 u32 final_random_seed[8];
1260 u32 transf_random_seed[8];
1261 u32 enc_iv[4];
1262 u32 contents_hash[8];
1263
1264 /* specific to version 1 */
1265 u32 contents_len;
1266 u32 contents[75000];
1267
1268 /* specific to version 2 */
1269 u32 expected_bytes[8];
1270
1271 } keepass_t;
1272
1273 typedef struct
1274 {
1275 u32 digest[4];
1276 u32 out[4];
1277
1278 } pdf14_tmp_t;
1279
1280 typedef struct
1281 {
1282 union
1283 {
1284 u32 dgst32[16];
1285 u64 dgst64[8];
1286 };
1287
1288 u32 dgst_len;
1289 u32 W_len;
1290
1291 } pdf17l8_tmp_t;
1292
1293 typedef struct
1294 {
1295 u32 digest_buf[4];
1296
1297 } phpass_tmp_t;
1298
1299 typedef struct
1300 {
1301 u32 digest_buf[4];
1302
1303 } md5crypt_tmp_t;
1304
1305 typedef struct
1306 {
1307 u32 alt_result[8];
1308
1309 u32 p_bytes[4];
1310 u32 s_bytes[4];
1311
1312 } sha256crypt_tmp_t;
1313
1314 typedef struct
1315 {
1316 u64 l_alt_result[8];
1317
1318 u64 l_p_bytes[2];
1319 u64 l_s_bytes[2];
1320
1321 } sha512crypt_tmp_t;
1322
1323 typedef struct
1324 {
1325 u32 ipad[5];
1326 u32 opad[5];
1327
1328 u32 dgst[10];
1329 u32 out[10];
1330
1331 } wpa_tmp_t;
1332
1333 typedef struct
1334 {
1335 u64 dgst[8];
1336
1337 } bitcoin_wallet_tmp_t;
1338
1339 typedef struct
1340 {
1341 u32 ipad[5];
1342 u32 opad[5];
1343
1344 u32 dgst[5];
1345 u32 out[4];
1346
1347 } dcc2_tmp_t;
1348
1349 typedef struct
1350 {
1351 u32 E[18];
1352
1353 u32 P[18];
1354
1355 u32 S0[256];
1356 u32 S1[256];
1357 u32 S2[256];
1358 u32 S3[256];
1359
1360 } bcrypt_tmp_t;
1361
1362 typedef struct
1363 {
1364 u32 digest[2];
1365
1366 u32 P[18];
1367
1368 u32 S0[256];
1369 u32 S1[256];
1370 u32 S2[256];
1371 u32 S3[256];
1372
1373 } pwsafe2_tmp_t;
1374
1375 typedef struct
1376 {
1377 u32 digest_buf[8];
1378
1379 } pwsafe3_tmp_t;
1380
1381 typedef struct
1382 {
1383 u32 digest_buf[5];
1384
1385 } androidpin_tmp_t;
1386
1387 typedef struct
1388 {
1389 u32 ipad[5];
1390 u32 opad[5];
1391
1392 u32 dgst[10];
1393 u32 out[10];
1394
1395 } androidfde_tmp_t;
1396
1397 typedef struct
1398 {
1399 u32 ipad[16];
1400 u32 opad[16];
1401
1402 u32 dgst[64];
1403 u32 out[64];
1404
1405 } tc_tmp_t;
1406
1407 typedef struct
1408 {
1409 u64 ipad[8];
1410 u64 opad[8];
1411
1412 u64 dgst[32];
1413 u64 out[32];
1414
1415 } tc64_tmp_t;
1416
1417 typedef struct
1418 {
1419 u32 ipad[4];
1420 u32 opad[4];
1421
1422 u32 dgst[32];
1423 u32 out[32];
1424
1425 } pbkdf2_md5_tmp_t;
1426
1427 typedef struct
1428 {
1429 u32 ipad[5];
1430 u32 opad[5];
1431
1432 u32 dgst[32];
1433 u32 out[32];
1434
1435 } pbkdf2_sha1_tmp_t;
1436
1437 typedef struct
1438 {
1439 u32 ipad[8];
1440 u32 opad[8];
1441
1442 u32 dgst[32];
1443 u32 out[32];
1444
1445 } pbkdf2_sha256_tmp_t;
1446
1447 typedef struct
1448 {
1449 u64 ipad[8];
1450 u64 opad[8];
1451
1452 u64 dgst[16];
1453 u64 out[16];
1454
1455 } pbkdf2_sha512_tmp_t;
1456
1457 typedef struct
1458 {
1459 u64 out[8];
1460
1461 } ecryptfs_tmp_t;
1462
1463 typedef struct
1464 {
1465 u64 ipad[8];
1466 u64 opad[8];
1467
1468 u64 dgst[16];
1469 u64 out[16];
1470
1471 } oraclet_tmp_t;
1472
1473 typedef struct
1474 {
1475 u32 ipad[5];
1476 u32 opad[5];
1477
1478 u32 dgst[5];
1479 u32 out[5];
1480
1481 } agilekey_tmp_t;
1482
1483 typedef struct
1484 {
1485 u32 ipad[5];
1486 u32 opad[5];
1487
1488 u32 dgst1[5];
1489 u32 out1[5];
1490
1491 u32 dgst2[5];
1492 u32 out2[5];
1493
1494 } mywallet_tmp_t;
1495
1496 typedef struct
1497 {
1498 u32 ipad[5];
1499 u32 opad[5];
1500
1501 u32 dgst[5];
1502 u32 out[5];
1503
1504 } sha1aix_tmp_t;
1505
1506 typedef struct
1507 {
1508 u32 ipad[8];
1509 u32 opad[8];
1510
1511 u32 dgst[8];
1512 u32 out[8];
1513
1514 } sha256aix_tmp_t;
1515
1516 typedef struct
1517 {
1518 u64 ipad[8];
1519 u64 opad[8];
1520
1521 u64 dgst[8];
1522 u64 out[8];
1523
1524 } sha512aix_tmp_t;
1525
1526 typedef struct
1527 {
1528 u32 ipad[8];
1529 u32 opad[8];
1530
1531 u32 dgst[8];
1532 u32 out[8];
1533
1534 } lastpass_tmp_t;
1535
1536 typedef struct
1537 {
1538 u64 digest_buf[8];
1539
1540 } drupal7_tmp_t;
1541
1542 typedef struct
1543 {
1544 u32 ipad[5];
1545 u32 opad[5];
1546
1547 u32 dgst[5];
1548 u32 out[5];
1549
1550 } lotus8_tmp_t;
1551
1552 typedef struct
1553 {
1554 u32 out[5];
1555
1556 } office2007_tmp_t;
1557
1558 typedef struct
1559 {
1560 u32 out[5];
1561
1562 } office2010_tmp_t;
1563
1564 typedef struct
1565 {
1566 u64 out[8];
1567
1568 } office2013_tmp_t;
1569
1570 typedef struct
1571 {
1572 u32 digest_buf[5];
1573
1574 } saph_sha1_tmp_t;
1575
1576 typedef struct
1577 {
1578 u32 block[16];
1579
1580 u32 dgst[8];
1581
1582 u32 block_len;
1583 u32 final_len;
1584
1585 } seven_zip_tmp_t;
1586
1587 typedef struct
1588 {
1589 u32 KEK[5];
1590
1591 u32 lsb[4];
1592 u32 cipher[4];
1593
1594 } axcrypt_tmp_t;
1595
1596 typedef struct
1597 {
1598 u32 tmp_digest[8];
1599
1600 } keepass_tmp_t;
1601
1602 typedef struct
1603 {
1604 u32 Kc[16];
1605 u32 Kd[16];
1606
1607 u32 iv[2];
1608
1609 } bsdicrypt_tmp_t;
1610
1611 typedef struct
1612 {
1613 u32 dgst[17][5];
1614
1615 } rar3_tmp_t;
1616
1617 typedef struct
1618 {
1619 u32 user[16];
1620
1621 } cram_md5_t;
1622
1623 typedef struct
1624 {
1625 u32 iv_buf[4];
1626 u32 iv_len;
1627
1628 u32 salt_buf[4];
1629 u32 salt_len;
1630
1631 u32 crc;
1632
1633 u32 data_buf[96];
1634 u32 data_len;
1635
1636 u32 unpack_size;
1637
1638 } seven_zip_t;
1639
1640 typedef struct
1641 {
1642 u32 key;
1643 u64 val;
1644
1645 } hcstat_table_t;
1646
1647 typedef struct
1648 {
1649 u32 cs_buf[0x100];
1650 u32 cs_len;
1651
1652 } cs_t;
1653
1654 typedef struct
1655 {
1656 u32 cmds[0x100];
1657
1658 } kernel_rule_t;
1659
1660 typedef struct
1661 {
1662 u32 gidvid;
1663 u32 il_pos;
1664
1665 } plain_t;
1666
1667 typedef struct
1668 {
1669 u32 i[16];
1670
1671 u32 pw_len;
1672
1673 u32 alignment_placeholder_1;
1674 u32 alignment_placeholder_2;
1675 u32 alignment_placeholder_3;
1676
1677 } pw_t;
1678
1679 typedef struct
1680 {
1681 u32 i;
1682
1683 } bf_t;
1684
1685 typedef struct
1686 {
1687 u32 i[8];
1688
1689 u32 pw_len;
1690
1691 } comb_t;
1692
1693 typedef struct
1694 {
1695 u32 b[32];
1696
1697 } bs_word_t;
1698
1699 typedef struct
1700 {
1701 uint4 P[64];
1702
1703 } scrypt_tmp_t;