2 * Author......: Jens Steube <jens.steube@gmail.com>
8 #include "include/constants.h"
9 #include "include/kernel_vendor.h"
28 #include "include/kernel_functions.c"
29 #include "types_amd.c"
30 #include "common_amd.c"
33 #define VECT_COMPARE_M "check_multi_vect1_comp4.c"
37 #define VECT_COMPARE_M "check_multi_vect2_comp4.c"
41 #define VECT_COMPARE_M "check_multi_vect4_comp4.c"
44 __constant u32 k_sha256[64] =
46 SHA256C00, SHA256C01, SHA256C02, SHA256C03,
47 SHA256C04, SHA256C05, SHA256C06, SHA256C07,
48 SHA256C08, SHA256C09, SHA256C0a, SHA256C0b,
49 SHA256C0c, SHA256C0d, SHA256C0e, SHA256C0f,
50 SHA256C10, SHA256C11, SHA256C12, SHA256C13,
51 SHA256C14, SHA256C15, SHA256C16, SHA256C17,
52 SHA256C18, SHA256C19, SHA256C1a, SHA256C1b,
53 SHA256C1c, SHA256C1d, SHA256C1e, SHA256C1f,
54 SHA256C20, SHA256C21, SHA256C22, SHA256C23,
55 SHA256C24, SHA256C25, SHA256C26, SHA256C27,
56 SHA256C28, SHA256C29, SHA256C2a, SHA256C2b,
57 SHA256C2c, SHA256C2d, SHA256C2e, SHA256C2f,
58 SHA256C30, SHA256C31, SHA256C32, SHA256C33,
59 SHA256C34, SHA256C35, SHA256C36, SHA256C37,
60 SHA256C38, SHA256C39, SHA256C3a, SHA256C3b,
61 SHA256C3c, SHA256C3d, SHA256C3e, SHA256C3f,
64 static void sha256_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[8])
92 #define ROUND_EXPAND() \
94 w0_t = SHA256_EXPAND (we_t, w9_t, w1_t, w0_t); \
95 w1_t = SHA256_EXPAND (wf_t, wa_t, w2_t, w1_t); \
96 w2_t = SHA256_EXPAND (w0_t, wb_t, w3_t, w2_t); \
97 w3_t = SHA256_EXPAND (w1_t, wc_t, w4_t, w3_t); \
98 w4_t = SHA256_EXPAND (w2_t, wd_t, w5_t, w4_t); \
99 w5_t = SHA256_EXPAND (w3_t, we_t, w6_t, w5_t); \
100 w6_t = SHA256_EXPAND (w4_t, wf_t, w7_t, w6_t); \
101 w7_t = SHA256_EXPAND (w5_t, w0_t, w8_t, w7_t); \
102 w8_t = SHA256_EXPAND (w6_t, w1_t, w9_t, w8_t); \
103 w9_t = SHA256_EXPAND (w7_t, w2_t, wa_t, w9_t); \
104 wa_t = SHA256_EXPAND (w8_t, w3_t, wb_t, wa_t); \
105 wb_t = SHA256_EXPAND (w9_t, w4_t, wc_t, wb_t); \
106 wc_t = SHA256_EXPAND (wa_t, w5_t, wd_t, wc_t); \
107 wd_t = SHA256_EXPAND (wb_t, w6_t, we_t, wd_t); \
108 we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); \
109 wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); \
112 #define ROUND_STEP(i) \
114 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha256[i + 0]); \
115 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha256[i + 1]); \
116 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha256[i + 2]); \
117 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha256[i + 3]); \
118 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha256[i + 4]); \
119 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha256[i + 5]); \
120 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha256[i + 6]); \
121 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha256[i + 7]); \
122 SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha256[i + 8]); \
123 SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha256[i + 9]); \
124 SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha256[i + 10]); \
125 SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha256[i + 11]); \
126 SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha256[i + 12]); \
127 SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha256[i + 13]); \
128 SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, k_sha256[i + 14]); \
129 SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha256[i + 15]); \
135 for (int i = 16; i < 64; i += 16)
137 ROUND_EXPAND (); ROUND_STEP (i);
150 static void hmac_sha256_pad (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[8], u32x opad[8])
152 w0[0] = w0[0] ^ 0x36363636;
153 w0[1] = w0[1] ^ 0x36363636;
154 w0[2] = w0[2] ^ 0x36363636;
155 w0[3] = w0[3] ^ 0x36363636;
156 w1[0] = w1[0] ^ 0x36363636;
157 w1[1] = w1[1] ^ 0x36363636;
158 w1[2] = w1[2] ^ 0x36363636;
159 w1[3] = w1[3] ^ 0x36363636;
160 w2[0] = w2[0] ^ 0x36363636;
161 w2[1] = w2[1] ^ 0x36363636;
162 w2[2] = w2[2] ^ 0x36363636;
163 w2[3] = w2[3] ^ 0x36363636;
164 w3[0] = w3[0] ^ 0x36363636;
165 w3[1] = w3[1] ^ 0x36363636;
166 w3[2] = w3[2] ^ 0x36363636;
167 w3[3] = w3[3] ^ 0x36363636;
178 sha256_transform (w0, w1, w2, w3, ipad);
180 w0[0] = w0[0] ^ 0x6a6a6a6a;
181 w0[1] = w0[1] ^ 0x6a6a6a6a;
182 w0[2] = w0[2] ^ 0x6a6a6a6a;
183 w0[3] = w0[3] ^ 0x6a6a6a6a;
184 w1[0] = w1[0] ^ 0x6a6a6a6a;
185 w1[1] = w1[1] ^ 0x6a6a6a6a;
186 w1[2] = w1[2] ^ 0x6a6a6a6a;
187 w1[3] = w1[3] ^ 0x6a6a6a6a;
188 w2[0] = w2[0] ^ 0x6a6a6a6a;
189 w2[1] = w2[1] ^ 0x6a6a6a6a;
190 w2[2] = w2[2] ^ 0x6a6a6a6a;
191 w2[3] = w2[3] ^ 0x6a6a6a6a;
192 w3[0] = w3[0] ^ 0x6a6a6a6a;
193 w3[1] = w3[1] ^ 0x6a6a6a6a;
194 w3[2] = w3[2] ^ 0x6a6a6a6a;
195 w3[3] = w3[3] ^ 0x6a6a6a6a;
206 sha256_transform (w0, w1, w2, w3, opad);
209 static void hmac_sha256_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[8], u32x opad[8], u32x digest[8])
220 sha256_transform (w0, w1, w2, w3, digest);
237 w3[3] = (64 + 32) * 8;
248 sha256_transform (w0, w1, w2, w3, digest);
251 static void memcat8 (u32x block0[4], u32x block1[4], u32x block2[4], u32x block3[4], const u32 block_len, const u32 append[2])
256 block0[0] = append[0];
257 block0[1] = append[1];
261 block0[0] = block0[0] | append[0] << 8;
262 block0[1] = append[0] >> 24 | append[1] << 8;
263 block0[2] = append[1] >> 24;
267 block0[0] = block0[0] | append[0] << 16;
268 block0[1] = append[0] >> 16 | append[1] << 16;
269 block0[2] = append[1] >> 16;
273 block0[0] = block0[0] | append[0] << 24;
274 block0[1] = append[0] >> 8 | append[1] << 24;
275 block0[2] = append[1] >> 8;
279 block0[1] = append[0];
280 block0[2] = append[1];
284 block0[1] = block0[1] | append[0] << 8;
285 block0[2] = append[0] >> 24 | append[1] << 8;
286 block0[3] = append[1] >> 24;
290 block0[1] = block0[1] | append[0] << 16;
291 block0[2] = append[0] >> 16 | append[1] << 16;
292 block0[3] = append[1] >> 16;
296 block0[1] = block0[1] | append[0] << 24;
297 block0[2] = append[0] >> 8 | append[1] << 24;
298 block0[3] = append[1] >> 8;
302 block0[2] = append[0];
303 block0[3] = append[1];
307 block0[2] = block0[2] | append[0] << 8;
308 block0[3] = append[0] >> 24 | append[1] << 8;
309 block1[0] = append[1] >> 24;
313 block0[2] = block0[2] | append[0] << 16;
314 block0[3] = append[0] >> 16 | append[1] << 16;
315 block1[0] = append[1] >> 16;
319 block0[2] = block0[2] | append[0] << 24;
320 block0[3] = append[0] >> 8 | append[1] << 24;
321 block1[0] = append[1] >> 8;
325 block0[3] = append[0];
326 block1[0] = append[1];
330 block0[3] = block0[3] | append[0] << 8;
331 block1[0] = append[0] >> 24 | append[1] << 8;
332 block1[1] = append[1] >> 24;
336 block0[3] = block0[3] | append[0] << 16;
337 block1[0] = append[0] >> 16 | append[1] << 16;
338 block1[1] = append[1] >> 16;
342 block0[3] = block0[3] | append[0] << 24;
343 block1[0] = append[0] >> 8 | append[1] << 24;
344 block1[1] = append[1] >> 8;
348 block1[0] = append[0];
349 block1[1] = append[1];
353 block1[0] = block1[0] | append[0] << 8;
354 block1[1] = append[0] >> 24 | append[1] << 8;
355 block1[2] = append[1] >> 24;
359 block1[0] = block1[0] | append[0] << 16;
360 block1[1] = append[0] >> 16 | append[1] << 16;
361 block1[2] = append[1] >> 16;
365 block1[0] = block1[0] | append[0] << 24;
366 block1[1] = append[0] >> 8 | append[1] << 24;
367 block1[2] = append[1] >> 8;
371 block1[1] = append[0];
372 block1[2] = append[1];
376 block1[1] = block1[1] | append[0] << 8;
377 block1[2] = append[0] >> 24 | append[1] << 8;
378 block1[3] = append[1] >> 24;
382 block1[1] = block1[1] | append[0] << 16;
383 block1[2] = append[0] >> 16 | append[1] << 16;
384 block1[3] = append[1] >> 16;
388 block1[1] = block1[1] | append[0] << 24;
389 block1[2] = append[0] >> 8 | append[1] << 24;
390 block1[3] = append[1] >> 8;
394 block1[2] = append[0];
395 block1[3] = append[1];
399 block1[2] = block1[2] | append[0] << 8;
400 block1[3] = append[0] >> 24 | append[1] << 8;
401 block2[0] = append[1] >> 24;
405 block1[2] = block1[2] | append[0] << 16;
406 block1[3] = append[0] >> 16 | append[1] << 16;
407 block2[0] = append[1] >> 16;
411 block1[2] = block1[2] | append[0] << 24;
412 block1[3] = append[0] >> 8 | append[1] << 24;
413 block2[0] = append[1] >> 8;
417 block1[3] = append[0];
418 block2[0] = append[1];
422 block1[3] = block1[3] | append[0] << 8;
423 block2[0] = append[0] >> 24 | append[1] << 8;
424 block2[1] = append[1] >> 24;
428 block1[3] = block1[3] | append[0] << 16;
429 block2[0] = append[0] >> 16 | append[1] << 16;
430 block2[1] = append[1] >> 16;
434 block1[3] = block1[3] | append[0] << 24;
435 block2[0] = append[0] >> 8 | append[1] << 24;
436 block2[1] = append[1] >> 8;
440 block2[0] = append[0];
441 block2[1] = append[1];
445 block2[0] = block2[0] | append[0] << 8;
446 block2[1] = append[0] >> 24 | append[1] << 8;
447 block2[2] = append[1] >> 24;
451 block2[0] = block2[0] | append[0] << 16;
452 block2[1] = append[0] >> 16 | append[1] << 16;
453 block2[2] = append[1] >> 16;
457 block2[0] = block2[0] | append[0] << 24;
458 block2[1] = append[0] >> 8 | append[1] << 24;
459 block2[2] = append[1] >> 8;
463 block2[1] = append[0];
464 block2[2] = append[1];
468 block2[1] = block2[1] | append[0] << 8;
469 block2[2] = append[0] >> 24 | append[1] << 8;
470 block2[3] = append[1] >> 24;
474 block2[1] = block2[1] | append[0] << 16;
475 block2[2] = append[0] >> 16 | append[1] << 16;
476 block2[3] = append[1] >> 16;
480 block2[1] = block2[1] | append[0] << 24;
481 block2[2] = append[0] >> 8 | append[1] << 24;
482 block2[3] = append[1] >> 8;
486 block2[2] = append[0];
487 block2[3] = append[1];
491 block2[2] = block2[2] | append[0] << 8;
492 block2[3] = append[0] >> 24 | append[1] << 8;
493 block3[0] = append[1] >> 24;
497 block2[2] = block2[2] | append[0] << 16;
498 block2[3] = append[0] >> 16 | append[1] << 16;
499 block3[0] = append[1] >> 16;
503 block2[2] = block2[2] | append[0] << 24;
504 block2[3] = append[0] >> 8 | append[1] << 24;
505 block3[0] = append[1] >> 8;
509 block2[3] = append[0];
510 block3[0] = append[1];
514 block2[3] = block2[3] | append[0] << 8;
515 block3[0] = append[0] >> 24 | append[1] << 8;
516 block3[1] = append[1] >> 24;
520 block2[3] = block2[3] | append[0] << 16;
521 block3[0] = append[0] >> 16 | append[1] << 16;
522 block3[1] = append[1] >> 16;
526 block2[3] = block2[3] | append[0] << 24;
527 block3[0] = append[0] >> 8 | append[1] << 24;
528 block3[1] = append[1] >> 8;
532 block3[0] = append[0];
533 block3[1] = append[1];
537 block3[0] = block3[0] | append[0] << 8;
538 block3[1] = append[0] >> 24 | append[1] << 8;
539 block3[2] = append[1] >> 24;
543 block3[0] = block3[0] | append[0] << 16;
544 block3[1] = append[0] >> 16 | append[1] << 16;
545 block3[2] = append[1] >> 16;
549 block3[0] = block3[0] | append[0] << 24;
550 block3[1] = append[0] >> 8 | append[1] << 24;
551 block3[2] = append[1] >> 8;
555 block3[1] = append[0];
556 block3[2] = append[1];
560 block3[1] = block3[1] | append[0] << 8;
561 block3[2] = append[0] >> 24 | append[1] << 8;
562 block3[3] = append[1] >> 24;
566 block3[1] = block3[1] | append[0] << 16;
567 block3[2] = append[0] >> 16 | append[1] << 16;
568 block3[3] = append[1] >> 16;
572 block3[1] = block3[1] | append[0] << 24;
573 block3[2] = append[0] >> 8 | append[1] << 24;
574 block3[3] = append[1] >> 8;
578 block3[2] = append[0];
579 block3[3] = append[1];
584 static uint4 swap_workaround (uint4 v)
586 return (rotate ((v & 0x00FF00FF), 24u) | rotate ((v & 0xFF00FF00), 8u));
589 #define GET_SCRYPT_CNT(r,p) (2 * (r) * 16 * (p))
590 #define GET_SMIX_CNT(r,N) (2 * (r) * 16 * (N))
591 #define GET_STATE_CNT(r) (2 * (r) * 16)
593 #define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s));
595 #define SALSA20_2R() \
597 ADD_ROTATE_XOR (X1, X0, X3, 7); \
598 ADD_ROTATE_XOR (X2, X1, X0, 9); \
599 ADD_ROTATE_XOR (X3, X2, X1, 13); \
600 ADD_ROTATE_XOR (X0, X3, X2, 18); \
606 ADD_ROTATE_XOR (X3, X0, X1, 7); \
607 ADD_ROTATE_XOR (X2, X3, X0, 9); \
608 ADD_ROTATE_XOR (X1, X2, X3, 13); \
609 ADD_ROTATE_XOR (X0, X1, X2, 18); \
616 #define SALSA20_8_XOR() \
639 static void salsa_r (uint4 *T, const u32 r)
641 const u32 state_cnt = GET_STATE_CNT (r);
643 const u32 state_cnt4 = state_cnt / 4;
645 uint4 R0 = T[state_cnt4 - 4];
646 uint4 R1 = T[state_cnt4 - 3];
647 uint4 R2 = T[state_cnt4 - 2];
648 uint4 R3 = T[state_cnt4 - 1];
650 for (u32 i = 0; i < state_cnt4; i += 8)
682 #define exchg(x,y) { const uint4 t = T[(x)]; T[(x)] = T[(y)]; T[(y)] = t; }
684 #define exchg4(x,y) \
686 const u32 x4 = (x) * 4; \
687 const u32 y4 = (y) * 4; \
689 exchg (x4 + 0, y4 + 0); \
690 exchg (x4 + 1, y4 + 1); \
691 exchg (x4 + 2, y4 + 2); \
692 exchg (x4 + 3, y4 + 3); \
695 for (u32 i = 1; i < r / 1; i++)
703 for (u32 i = 1; i < r / 2; i++)
708 const u32 xr1 = (r * 2) - 1 - x;
709 const u32 yr1 = (r * 2) - 1 - y;
715 static void scrypt_smix (uint4 *X, uint4 *T, const u32 N, const u32 r, const u32 tmto, const u32 phy, __global uint4 *V)
717 const u32 state_cnt = GET_STATE_CNT (r);
719 const u32 state_cnt4 = state_cnt / 4;
721 #define Coord(x,y,z) (((x) * zSIZE) + ((y) * zSIZE * xSIZE) + (z))
722 #define CO Coord(x,y,z)
724 const u32 xSIZE = phy;
725 const u32 ySIZE = N / tmto;
726 const u32 zSIZE = state_cnt4;
728 const u32 gid = get_global_id (0);
730 const u32 x = gid % xSIZE;
733 for (u32 i = 0; i < state_cnt4; i += 4)
735 T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
736 T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
737 T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
738 T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
746 for (u32 y = 0; y < ySIZE; y++)
748 for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
750 for (u32 i = 0; i < tmto; i++) salsa_r (X, r);
753 for (u32 i = 0; i < N; i++)
755 const u32 k = X[zSIZE - 4].x & (N - 1);
757 const u32 y = k / tmto;
759 const u32 km = k - (y * tmto);
761 for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
763 for (u32 i = 0; i < km; i++) salsa_r (T, r);
765 for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z];
771 for (u32 i = 0; i < state_cnt4; i += 4)
773 T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
774 T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
775 T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
776 T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
785 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08900_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global scrypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global uint4 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
791 const u32 gid = get_global_id (0);
793 if (gid >= gid_max) return;
797 w0[0] = pws[gid].i[ 0];
798 w0[1] = pws[gid].i[ 1];
799 w0[2] = pws[gid].i[ 2];
800 w0[3] = pws[gid].i[ 3];
804 w1[0] = pws[gid].i[ 4];
805 w1[1] = pws[gid].i[ 5];
806 w1[2] = pws[gid].i[ 6];
807 w1[3] = pws[gid].i[ 7];
811 w2[0] = pws[gid].i[ 8];
812 w2[1] = pws[gid].i[ 9];
813 w2[2] = pws[gid].i[10];
814 w2[3] = pws[gid].i[11];
818 w3[0] = pws[gid].i[12];
819 w3[1] = pws[gid].i[13];
820 w3[2] = pws[gid].i[14];
821 w3[3] = pws[gid].i[15];
829 salt_buf0[0] = salt_bufs[salt_pos].salt_buf[0];
830 salt_buf0[1] = salt_bufs[salt_pos].salt_buf[1];
831 salt_buf0[2] = salt_bufs[salt_pos].salt_buf[2];
832 salt_buf0[3] = salt_bufs[salt_pos].salt_buf[3];
836 salt_buf1[0] = salt_bufs[salt_pos].salt_buf[4];
837 salt_buf1[1] = salt_bufs[salt_pos].salt_buf[5];
838 salt_buf1[2] = salt_bufs[salt_pos].salt_buf[6];
839 salt_buf1[3] = salt_bufs[salt_pos].salt_buf[7];
841 const u32 salt_len = salt_bufs[salt_pos].salt_len;
847 const u32 scrypt_r = SCRYPT_R;
848 const u32 scrypt_p = SCRYPT_P;
849 //const u32 scrypt_N = SCRYPT_N;
851 //const u32 state_cnt = GET_STATE_CNT (scrypt_r);
852 const u32 scrypt_cnt = GET_SCRYPT_CNT (scrypt_r, scrypt_p);
853 //const u32 smix_cnt = GET_SMIX_CNT (scrypt_r, scrypt_N);
856 * 1st pbkdf2, creates B
859 w0[0] = swap_workaround (w0[0]);
860 w0[1] = swap_workaround (w0[1]);
861 w0[2] = swap_workaround (w0[2]);
862 w0[3] = swap_workaround (w0[3]);
863 w1[0] = swap_workaround (w1[0]);
864 w1[1] = swap_workaround (w1[1]);
865 w1[2] = swap_workaround (w1[2]);
866 w1[3] = swap_workaround (w1[3]);
867 w2[0] = swap_workaround (w2[0]);
868 w2[1] = swap_workaround (w2[1]);
869 w2[2] = swap_workaround (w2[2]);
870 w2[3] = swap_workaround (w2[3]);
871 w3[0] = swap_workaround (w3[0]);
872 w3[1] = swap_workaround (w3[1]);
873 w3[2] = swap_workaround (w3[2]);
874 w3[3] = swap_workaround (w3[3]);
879 hmac_sha256_pad (w0, w1, w2, w3, ipad, opad);
881 for (u32 i = 0, j = 0, k = 0; i < scrypt_cnt; i += 8, j += 1, k += 2)
883 w0[0] = salt_buf0[0];
884 w0[1] = salt_buf0[1];
885 w0[2] = salt_buf0[2];
886 w0[3] = salt_buf0[3];
887 w1[0] = salt_buf1[0];
888 w1[1] = salt_buf1[1];
889 w1[2] = salt_buf1[2];
890 w1[3] = salt_buf1[3];
902 append[0] = swap_workaround (j + 1);
905 memcat8 (w0, w1, w2, w3, salt_len, append);
907 w0[0] = swap_workaround (w0[0]);
908 w0[1] = swap_workaround (w0[1]);
909 w0[2] = swap_workaround (w0[2]);
910 w0[3] = swap_workaround (w0[3]);
911 w1[0] = swap_workaround (w1[0]);
912 w1[1] = swap_workaround (w1[1]);
913 w1[2] = swap_workaround (w1[2]);
914 w1[3] = swap_workaround (w1[3]);
915 w2[0] = swap_workaround (w2[0]);
916 w2[1] = swap_workaround (w2[1]);
917 w2[2] = swap_workaround (w2[2]);
918 w2[3] = swap_workaround (w2[3]);
919 w3[0] = swap_workaround (w3[0]);
920 w3[1] = swap_workaround (w3[1]);
922 w3[3] = (64 + salt_len + 4) * 8;
926 hmac_sha256_run (w0, w1, w2, w3, ipad, opad, digest);
928 const uint4 tmp0 = (uint4) (digest[0], digest[1], digest[2], digest[3]);
929 const uint4 tmp1 = (uint4) (digest[4], digest[5], digest[6], digest[7]);
931 barrier (CLK_GLOBAL_MEM_FENCE);
933 tmps[gid].P[k + 0] = tmp0;
934 tmps[gid].P[k + 1] = tmp1;
938 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08900_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global scrypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global uint4 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
940 const u32 gid = get_global_id (0);
942 if (gid >= gid_max) return;
944 const u32 scrypt_phy = salt_bufs[salt_pos].scrypt_phy;
946 const u32 state_cnt = GET_STATE_CNT (SCRYPT_R);
947 const u32 scrypt_cnt = GET_SCRYPT_CNT (SCRYPT_R, SCRYPT_P);
949 const u32 state_cnt4 = state_cnt / 4;
950 const u32 scrypt_cnt4 = scrypt_cnt / 4;
956 for (int z = 0; z < state_cnt4; z++) X[z] = swap_workaround (tmps[gid].P[z]);
958 scrypt_smix (X, T, SCRYPT_N, SCRYPT_R, SCRYPT_TMTO, scrypt_phy, d_scryptV_buf);
961 for (int z = 0; z < state_cnt4; z++) tmps[gid].P[z] = swap_workaround (X[z]);
964 for (int i = state_cnt4; i < scrypt_cnt4; i += state_cnt4)
966 for (int z = 0; z < state_cnt4; z++) X[z] = swap_workaround (tmps[gid].P[i + z]);
968 scrypt_smix (X, T, SCRYPT_N, SCRYPT_R, SCRYPT_TMTO, scrypt_phy, d_scryptV_buf);
970 for (int z = 0; z < state_cnt4; z++) tmps[gid].P[i + z] = swap_workaround (X[z]);
975 __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m08900_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global scrypt_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global uint4 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
981 const u32 gid = get_global_id (0);
982 const u32 lid = get_local_id (0);
984 if (gid >= gid_max) return;
988 w0[0] = pws[gid].i[ 0];
989 w0[1] = pws[gid].i[ 1];
990 w0[2] = pws[gid].i[ 2];
991 w0[3] = pws[gid].i[ 3];
995 w1[0] = pws[gid].i[ 4];
996 w1[1] = pws[gid].i[ 5];
997 w1[2] = pws[gid].i[ 6];
998 w1[3] = pws[gid].i[ 7];
1002 w2[0] = pws[gid].i[ 8];
1003 w2[1] = pws[gid].i[ 9];
1004 w2[2] = pws[gid].i[10];
1005 w2[3] = pws[gid].i[11];
1009 w3[0] = pws[gid].i[12];
1010 w3[1] = pws[gid].i[13];
1011 w3[2] = pws[gid].i[14];
1012 w3[3] = pws[gid].i[15];
1018 const u32 scrypt_r = SCRYPT_R;
1019 const u32 scrypt_p = SCRYPT_P;
1020 //const u32 scrypt_N = SCRYPT_N;
1022 const u32 scrypt_cnt = GET_SCRYPT_CNT (scrypt_r, scrypt_p);
1024 const u32 scrypt_cnt4 = scrypt_cnt / 4;
1027 * 2nd pbkdf2, creates B
1030 w0[0] = swap_workaround (w0[0]);
1031 w0[1] = swap_workaround (w0[1]);
1032 w0[2] = swap_workaround (w0[2]);
1033 w0[3] = swap_workaround (w0[3]);
1034 w1[0] = swap_workaround (w1[0]);
1035 w1[1] = swap_workaround (w1[1]);
1036 w1[2] = swap_workaround (w1[2]);
1037 w1[3] = swap_workaround (w1[3]);
1038 w2[0] = swap_workaround (w2[0]);
1039 w2[1] = swap_workaround (w2[1]);
1040 w2[2] = swap_workaround (w2[2]);
1041 w2[3] = swap_workaround (w2[3]);
1042 w3[0] = swap_workaround (w3[0]);
1043 w3[1] = swap_workaround (w3[1]);
1044 w3[2] = swap_workaround (w3[2]);
1045 w3[3] = swap_workaround (w3[3]);
1050 hmac_sha256_pad (w0, w1, w2, w3, ipad, opad);
1052 for (u32 l = 0; l < scrypt_cnt4; l += 4)
1054 barrier (CLK_GLOBAL_MEM_FENCE);
1058 tmp = tmps[gid].P[l + 0];
1065 tmp = tmps[gid].P[l + 1];
1072 tmp = tmps[gid].P[l + 2];
1079 tmp = tmps[gid].P[l + 3];
1086 sha256_transform (w0, w1, w2, w3, ipad);
1104 w3[3] = (64 + (scrypt_cnt * 4) + 4) * 8;
1108 hmac_sha256_run (w0, w1, w2, w3, ipad, opad, digest);
1110 const u32x r0 = swap_workaround (digest[DGST_R0]);
1111 const u32x r1 = swap_workaround (digest[DGST_R1]);
1112 const u32x r2 = swap_workaround (digest[DGST_R2]);
1113 const u32x r3 = swap_workaround (digest[DGST_R3]);
1117 #include VECT_COMPARE_M