Added SIMD code for all generic PBKDF2-HMAC-* modes
[hashcat.git] / include / kernel_functions.c
1 /**
2 * Author......: Jens Steube <jens.steube@gmail.com>
3 * License.....: MIT
4 */
5
6 #if defined _MD4_ || defined _DCC2_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _MS_DRSR_ || defined _KRB5TGS_
7
8 #define MD4_F_S(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
9 #define MD4_G_S(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
10 #define MD4_H_S(x,y,z) ((x) ^ (y) ^ (z))
11
12 #ifdef IS_NV
13 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
14 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
15 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
16 #define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
17 #define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
18 #endif
19
20 #ifdef IS_AMD
21 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
22 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
23 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
24 #define MD4_Fo(x,y,z) (bitselect ((z), (y), (x)))
25 #define MD4_Go(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
26 #endif
27
28 #ifdef IS_GENERIC
29 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
30 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
31 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
32 #define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
33 #define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
34 #endif
35
36 #define MD4_STEP_S(f,a,b,c,d,x,K,s) \
37 { \
38 a += K; \
39 a += x; \
40 a += f (b, c, d); \
41 a = rotl32_S (a, s); \
42 }
43
44 #define MD4_STEP(f,a,b,c,d,x,K,s) \
45 { \
46 a += K; \
47 a += x; \
48 a += f (b, c, d); \
49 a = rotl32 (a, s); \
50 }
51
52 #define MD4_STEP0(f,a,b,c,d,K,s) \
53 { \
54 a += K; \
55 a += f (b, c, d); \
56 a = rotl32 (a, s); \
57 }
58
59 #endif
60
61 #if defined _MD5_ || defined _MD5H_ || defined _SAPB_ || defined _OLDOFFICE01_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _PBKDF2_MD5_ || defined _KRB5TGS_
62
63 #define MD5_F_S(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
64 #define MD5_G_S(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
65 #define MD5_H_S(x,y,z) ((x) ^ (y) ^ (z))
66 #define MD5_I_S(x,y,z) ((y) ^ ((x) | ~(z)))
67
68 #ifdef IS_NV
69 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
70 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
71 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
72 #define MD5_I(x,y,z) ((y) ^ ((x) | ~(z)))
73 #define MD5_Fo(x,y,z) (MD5_F((x), (y), (z)))
74 #define MD5_Go(x,y,z) (MD5_G((x), (y), (z)))
75 #endif
76
77 #ifdef IS_AMD
78 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
79 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
80 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
81 #define MD5_I(x,y,z) (bitselect (0xffffffffU, (x), (z)) ^ (y))
82 #define MD5_Fo(x,y,z) (bitselect ((z), (y), (x)))
83 #define MD5_Go(x,y,z) (bitselect ((y), (x), (z)))
84 #endif
85
86 #ifdef IS_GENERIC
87 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
88 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
89 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
90 #define MD5_I(x,y,z) ((y) ^ ((x) | ~(z)))
91 #define MD5_Fo(x,y,z) (MD5_F((x), (y), (z)))
92 #define MD5_Go(x,y,z) (MD5_G((x), (y), (z)))
93 #endif
94
95 #define MD5_STEP_S(f,a,b,c,d,x,K,s) \
96 { \
97 a += K; \
98 a += x; \
99 a += f (b, c, d); \
100 a = rotl32_S (a, s); \
101 a += b; \
102 }
103
104 #define MD5_STEP(f,a,b,c,d,x,K,s) \
105 { \
106 a += K; \
107 a += x; \
108 a += f (b, c, d); \
109 a = rotl32 (a, s); \
110 a += b; \
111 }
112
113 #define MD5_STEP0(f,a,b,c,d,K,s) \
114 { \
115 a += K; \
116 a += f (b, c, d); \
117 a = rotl32 (a, s); \
118 a += b; \
119 }
120 #endif
121
122 #if defined _SHA1_ || defined _SAPG_ || defined _OFFICE2007_ || defined _OFFICE2010_ || defined _OLDOFFICE34_ || defined _ANDROIDFDE_ || defined _DCC2_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _PSAFE2_ || defined _LOTUS8_ || defined _PBKDF2_SHA1_ || defined _RAR3_ || defined _SHA256_SHA1_ || defined _ZIP2_
123
124 #ifdef IS_NV
125 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
126 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
127 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
128 #define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
129 #define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
130 #endif
131
132 #ifdef IS_AMD
133 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
134 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
135 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
136 #define SHA1_F0o(x,y,z) (bitselect ((z), (y), (x)))
137 #define SHA1_F2o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
138 #endif
139
140 #ifdef IS_GENERIC
141 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
142 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
143 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
144 #define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
145 #define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
146 #endif
147
148 #define SHA1_STEP_S(f,a,b,c,d,e,x) \
149 { \
150 e += K; \
151 e += x; \
152 e += f (b, c, d); \
153 e += rotl32_S (a, 5u); \
154 b = rotl32_S (b, 30u); \
155 }
156
157 #define SHA1_STEP(f,a,b,c,d,e,x) \
158 { \
159 e += K; \
160 e += x; \
161 e += f (b, c, d); \
162 e += rotl32 (a, 5u); \
163 b = rotl32 (b, 30u); \
164 }
165
166 #define SHA1_STEP0(f,a,b,c,d,e,x) \
167 { \
168 e += K; \
169 e += f (b, c, d); \
170 e += rotl32 (a, 5u); \
171 b = rotl32 (b, 30u); \
172 }
173
174 #define SHA1_STEPX(f,a,b,c,d,e,x) \
175 { \
176 e += x; \
177 e += f (b, c, d); \
178 e += rotl32 (a, 5u); \
179 b = rotl32 (b, 30u); \
180 }
181
182 #define SHA1_STEP_PE(f,a,b,c,d,e,x) \
183 { \
184 e += x; \
185 e += f (b, c, d); \
186 e += rotl32 (a, 5u); \
187 }
188
189 #define SHA1_STEP_PB(f,a,b,c,d,e,x) \
190 { \
191 e += K; \
192 b = rotl32 (b, 30u); \
193 }
194 #endif
195
196 #if defined _SHA256_ || defined _PDF17L8_ || defined _SEVEN_ZIP_ || defined _ANDROIDFDE_ || defined _CLOUDKEY_ || defined _SCRYPT_ || defined _PBKDF2_SHA256_ || defined _SHA256_SHA1_ || defined _MS_DRSR_ || defined _ANDROIDFDE_SAMSUNG_ || defined _RAR5_ || defined _KEEPASS_
197
198 #define SHIFT_RIGHT_32(x,n) ((x) >> (n))
199
200 #define SHA256_S0_S(x) (rotl32_S ((x), 25u) ^ rotl32_S ((x), 14u) ^ SHIFT_RIGHT_32 ((x), 3u))
201 #define SHA256_S1_S(x) (rotl32_S ((x), 15u) ^ rotl32_S ((x), 13u) ^ SHIFT_RIGHT_32 ((x), 10u))
202 #define SHA256_S2_S(x) (rotl32_S ((x), 30u) ^ rotl32_S ((x), 19u) ^ rotl32_S ((x), 10u))
203 #define SHA256_S3_S(x) (rotl32_S ((x), 26u) ^ rotl32_S ((x), 21u) ^ rotl32_S ((x), 7u))
204
205 #define SHA256_S0(x) (rotl32 ((x), 25u) ^ rotl32 ((x), 14u) ^ SHIFT_RIGHT_32 ((x), 3u))
206 #define SHA256_S1(x) (rotl32 ((x), 15u) ^ rotl32 ((x), 13u) ^ SHIFT_RIGHT_32 ((x), 10u))
207 #define SHA256_S2(x) (rotl32 ((x), 30u) ^ rotl32 ((x), 19u) ^ rotl32 ((x), 10u))
208 #define SHA256_S3(x) (rotl32 ((x), 26u) ^ rotl32 ((x), 21u) ^ rotl32 ((x), 7u))
209
210 #ifdef IS_NV
211 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
212 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
213 #define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
214 #define SHA256_F1o(x,y,z) (SHA256_F1 ((x), (y), (z)))
215 #endif
216
217 #ifdef IS_AMD
218 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
219 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
220 #define SHA256_F0o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
221 #define SHA256_F1o(x,y,z) (bitselect ((z), (y), (x)))
222 #endif
223
224 #ifdef IS_GENERIC
225 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
226 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
227 #define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
228 #define SHA256_F1o(x,y,z) (SHA256_F1 ((x), (y), (z)))
229 #endif
230
231 #define SHA256_STEP_S(F0,F1,a,b,c,d,e,f,g,h,x,K) \
232 { \
233 h += K; \
234 h += x; \
235 h += SHA256_S3_S (e); \
236 h += F1 (e,f,g); \
237 d += h; \
238 h += SHA256_S2_S (a); \
239 h += F0 (a,b,c); \
240 }
241
242 #define SHA256_EXPAND_S(x,y,z,w) (SHA256_S1_S (x) + y + SHA256_S0_S (z) + w)
243
244 #define SHA256_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
245 { \
246 h += K; \
247 h += x; \
248 h += SHA256_S3 (e); \
249 h += F1 (e,f,g); \
250 d += h; \
251 h += SHA256_S2 (a); \
252 h += F0 (a,b,c); \
253 }
254
255 #define SHA256_EXPAND(x,y,z,w) (SHA256_S1 (x) + y + SHA256_S0 (z) + w)
256
257 #endif
258
259 #if defined _SHA384_ || defined _PDF17L8_
260
261 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
262
263 #define SHA384_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
264 #define SHA384_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
265 #define SHA384_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
266 #define SHA384_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
267
268 #define SHA384_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
269 #define SHA384_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
270
271 #ifdef IS_NV
272 #define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
273 #define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
274 #endif
275
276 #ifdef IS_AMD
277 #define SHA384_F0o(x,y,z) (bitselect ((z), (y), (x)))
278 #define SHA384_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
279 #endif
280
281 #ifdef IS_GENERIC
282 #define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
283 #define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
284 #endif
285
286 #define SHA384_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
287 { \
288 h += K; \
289 h += x; \
290 h += SHA384_S1 (e); \
291 h += F0 (e, f, g); \
292 d += h; \
293 h += SHA384_S0 (a); \
294 h += F1 (a, b, c); \
295 }
296
297 #define SHA384_EXPAND(x,y,z,w) (SHA384_S3 (x) + y + SHA384_S2 (z) + w)
298 #endif
299
300 #if defined _SHA512_ || defined _CLOUDKEY_ || defined _OFFICE2013_ || defined _PDF17L8_ || defined _PBKDF2_SHA512_
301
302 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
303
304 #define SHA512_S0_S(x) (rotr64_S ((x), 28) ^ rotr64_S ((x), 34) ^ rotr64_S ((x), 39))
305 #define SHA512_S1_S(x) (rotr64_S ((x), 14) ^ rotr64_S ((x), 18) ^ rotr64_S ((x), 41))
306 #define SHA512_S2_S(x) (rotr64_S ((x), 1) ^ rotr64_S ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
307 #define SHA512_S3_S(x) (rotr64_S ((x), 19) ^ rotr64_S ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
308
309 #define SHA512_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
310 #define SHA512_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
311 #define SHA512_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
312 #define SHA512_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
313
314 #define SHA512_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
315 #define SHA512_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
316
317 #ifdef IS_NV
318 #define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
319 #define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
320 #endif
321
322 #ifdef IS_AMD
323 #define SHA512_F0o(x,y,z) (bitselect ((z), (y), (x)))
324 #define SHA512_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
325 #endif
326
327 #ifdef IS_GENERIC
328 #define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
329 #define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
330 #endif
331
332 #define SHA512_STEP_S(F0,F1,a,b,c,d,e,f,g,h,x,K) \
333 { \
334 h += K; \
335 h += x; \
336 h += SHA512_S1_S (e); \
337 h += F0 (e, f, g); \
338 d += h; \
339 h += SHA512_S0_S (a); \
340 h += F1 (a, b, c); \
341 }
342
343 #define SHA512_EXPAND_S(x,y,z,w) (SHA512_S3_S (x) + y + SHA512_S2_S (z) + w)
344
345 #define SHA512_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
346 { \
347 h += K; \
348 h += x; \
349 h += SHA512_S1 (e); \
350 h += F0 (e, f, g); \
351 d += h; \
352 h += SHA512_S0 (a); \
353 h += F1 (a, b, c); \
354 }
355
356 #define SHA512_EXPAND(x,y,z,w) (SHA512_S3 (x) + y + SHA512_S2 (z) + w)
357 #endif
358
359 #ifdef _RIPEMD160_
360
361 #ifdef IS_NV
362 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
363 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
364 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
365 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
366 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
367 #define RIPEMD160_Go(x,y,z) (RIPEMD160_G ((x), (y), (z)))
368 #define RIPEMD160_Io(x,y,z) (RIPEMD160_I ((x), (y), (z)))
369 #endif
370
371 #ifdef IS_AMD
372 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
373 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
374 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
375 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
376 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
377 #define RIPEMD160_Go(x,y,z) (bitselect ((z), (y), (x)))
378 #define RIPEMD160_Io(x,y,z) (bitselect ((y), (x), (z)))
379 #endif
380
381 #ifdef IS_GENERIC
382 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
383 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
384 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
385 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
386 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
387 #define RIPEMD160_Go(x,y,z) (RIPEMD160_G ((x), (y), (z)))
388 #define RIPEMD160_Io(x,y,z) (RIPEMD160_I ((x), (y), (z)))
389 #endif
390
391 #define RIPEMD160_STEP(f,a,b,c,d,e,x,K,s) \
392 { \
393 a += K; \
394 a += x; \
395 a += f (b, c, d); \
396 a = rotl32 (a, s); \
397 a += e; \
398 c = rotl32 (c, 10u); \
399 }
400
401 #define ROTATE_LEFT_WORKAROUND_BUG(a,n) ((a << n) | (a >> (32 - n)))
402
403 #define RIPEMD160_STEP_WORKAROUND_BUG(f,a,b,c,d,e,x,K,s) \
404 { \
405 a += K; \
406 a += x; \
407 a += f (b, c, d); \
408 a = ROTATE_LEFT_WORKAROUND_BUG (a, s); \
409 a += e; \
410 c = rotl32 (c, 10u); \
411 }
412
413 #endif