44cf1b9409db3c1183f2dbc7d8acfab281b8a361
[hashcat.git] / include / kernel_functions.c
1 /**
2 * Author......: Jens Steube <jens.steube@gmail.com>
3 * License.....: MIT
4 */
5
6 #if defined _MD4_ || defined _DCC2_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _MS_DRSR_
7
8 #define MD4_F_S(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
9 #define MD4_G_S(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
10 #define MD4_H_S(x,y,z) ((x) ^ (y) ^ (z))
11
12 #ifdef IS_NV
13 #if CUDA_ARCH >= 500
14 #define MD4_F(x,y,z) lut3_ca ((x), (y), (z))
15 #define MD4_G(x,y,z) lut3_e8 ((x), (y), (z))
16 #define MD4_H(x,y,z) lut3_96 ((x), (y), (z))
17 #define MD4_H1(x,y,z) lut3_96 ((x), (y), (z))
18 #define MD4_H2(x,y,z) lut3_96 ((x), (y), (z))
19 #else
20 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
21 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
22 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
23 #define MD4_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
24 #define MD4_H2(x,y,z) ((x) ^ tmp2)
25 #endif
26 #define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
27 #define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
28 #endif
29
30 #ifdef IS_AMD
31 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
32 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
33 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
34 #define MD4_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
35 #define MD4_H2(x,y,z) ((x) ^ tmp2)
36 #define MD4_Fo(x,y,z) (bitselect ((z), (y), (x)))
37 #define MD4_Go(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
38 #endif
39
40 #ifdef IS_GENERIC
41 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
42 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
43 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
44 #define MD4_H1(x,y,z) (MD4_H((x), (y), (z)))
45 #define MD4_H2(x,y,z) (MD4_H((x), (y), (z)))
46 #define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
47 #define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
48 #endif
49
50 #define MD4_STEP(f,a,b,c,d,x,K,s) \
51 { \
52 a += K; \
53 a += x; \
54 a += f (b, c, d); \
55 a = rotl32 (a, s); \
56 }
57
58 #define MD4_STEP0(f,a,b,c,d,K,s) \
59 { \
60 a += K; \
61 a += f (b, c, d); \
62 a = rotl32 (a, s); \
63 }
64
65 #endif
66
67 #if defined _MD5_ || defined _MD5H_ || defined _SAPB_ || defined _OLDOFFICE01_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _PBKDF2_MD5_
68
69 #define MD5_F_S(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
70 #define MD5_G_S(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
71 #define MD5_H_S(x,y,z) ((x) ^ (y) ^ (z))
72 #define MD5_I_S(x,y,z) ((y) ^ ((x) | ~(z)))
73
74 #ifdef IS_NV
75 #if CUDA_ARCH >= 500
76 #define MD5_F(x,y,z) lut3_ca ((x), (y), (z))
77 #define MD5_G(x,y,z) lut3_e4 ((x), (y), (z))
78 #define MD5_H(x,y,z) lut3_96 ((x), (y), (z))
79 #define MD5_H1(x,y,z) lut3_96 ((x), (y), (z))
80 #define MD5_H2(x,y,z) lut3_96 ((x), (y), (z))
81 #define MD5_I(x,y,z) lut3_39 ((x), (y), (z))
82 #else
83 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
84 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
85 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
86 #define MD5_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
87 #define MD5_H2(x,y,z) ((x) ^ tmp2)
88 #define MD5_I(x,y,z) ((y) ^ ((x) | ~(z)))
89 #endif
90 #define MD5_Fo(x,y,z) (MD5_F((x), (y), (z)))
91 #define MD5_Go(x,y,z) (MD5_G((x), (y), (z)))
92 #endif
93
94 #ifdef IS_AMD
95 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
96 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
97 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
98 #define MD5_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
99 #define MD5_H2(x,y,z) ((x) ^ tmp2)
100 #define MD5_I(x,y,z) (bitselect (0xffffffffU, (x), (z)) ^ (y))
101 #define MD5_Fo(x,y,z) (bitselect ((z), (y), (x)))
102 #define MD5_Go(x,y,z) (bitselect ((y), (x), (z)))
103 #endif
104
105 #ifdef IS_GENERIC
106 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
107 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
108 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
109 #define MD5_H1(x,y,z) (MD5_H((x), (y), (z)))
110 #define MD5_H2(x,y,z) (MD5_H((x), (y), (z)))
111 #define MD5_I(x,y,z) ((y) ^ ((x) | ~(z)))
112 #define MD5_Fo(x,y,z) (MD5_F((x), (y), (z)))
113 #define MD5_Go(x,y,z) (MD5_G((x), (y), (z)))
114 #endif
115
116 #define MD5_STEP(f,a,b,c,d,x,K,s) \
117 { \
118 a += K; \
119 a += x; \
120 a += f (b, c, d); \
121 a = rotl32 (a, s); \
122 a += b; \
123 }
124
125 #define MD5_STEP0(f,a,b,c,d,K,s) \
126 { \
127 a += K; \
128 a += f (b, c, d); \
129 a = rotl32 (a, s); \
130 a += b; \
131 }
132 #endif
133
134 #if defined _SHA1_ || defined _SAPG_ || defined _OFFICE2007_ || defined _OFFICE2010_ || defined _OLDOFFICE34_ || defined _ANDROIDFDE_ || defined _DCC2_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _PSAFE2_ || defined _LOTUS8_ || defined _PBKDF2_SHA1_ || defined _RAR3_ || defined _SHA256_SHA1_
135
136 #ifdef IS_NV
137 #if CUDA_ARCH >= 500
138 #define SHA1_F0(x,y,z) lut3_ca ((x), (y), (z))
139 #define SHA1_F1(x,y,z) lut3_96 ((x), (y), (z))
140 #define SHA1_F2(x,y,z) lut3_e8 ((x), (y), (z))
141 #else
142 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
143 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
144 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
145 #endif
146 #define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
147 #define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
148 #endif
149
150 #ifdef IS_AMD
151 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
152 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
153 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
154 #define SHA1_F0o(x,y,z) (bitselect ((z), (y), (x)))
155 #define SHA1_F2o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
156 #endif
157
158 #ifdef IS_GENERIC
159 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
160 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
161 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
162 #define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
163 #define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
164 #endif
165
166 #define SHA1_STEP(f,a,b,c,d,e,x) \
167 { \
168 e += K; \
169 e += x; \
170 e += f (b, c, d); \
171 e += rotl32 (a, 5u); \
172 b = rotl32 (b, 30u); \
173 }
174
175 #define SHA1_STEP0(f,a,b,c,d,e,x) \
176 { \
177 e += K; \
178 e += f (b, c, d); \
179 e += rotl32 (a, 5u); \
180 b = rotl32 (b, 30u); \
181 }
182
183 #define SHA1_STEPX(f,a,b,c,d,e,x) \
184 { \
185 e += x; \
186 e += f (b, c, d); \
187 e += rotl32 (a, 5u); \
188 b = rotl32 (b, 30u); \
189 }
190
191 #define SHA1_STEP_PE(f,a,b,c,d,e,x) \
192 { \
193 e += x; \
194 e += f (b, c, d); \
195 e += rotl32 (a, 5u); \
196 }
197
198 #define SHA1_STEP_PB(f,a,b,c,d,e,x) \
199 { \
200 e += K; \
201 b = rotl32 (b, 30u); \
202 }
203 #endif
204
205 #if defined _SHA256_ || defined _PDF17L8_ || defined _SEVEN_ZIP_ || defined _ANDROIDFDE_ || defined _CLOUDKEY_ || defined _SCRYPT_ || defined _PBKDF2_SHA256_ || defined _SHA256_SHA1_ || defined _MS_DRSR_ || defined _ANDROIDFDE_SAMSUNG_ || defined _RAR5_
206
207 #define SHIFT_RIGHT_32(x,n) ((x) >> (n))
208
209 #define SHA256_S0(x) (rotl32 ((x), 25u) ^ rotl32 ((x), 14u) ^ SHIFT_RIGHT_32 ((x), 3u))
210 #define SHA256_S1(x) (rotl32 ((x), 15u) ^ rotl32 ((x), 13u) ^ SHIFT_RIGHT_32 ((x), 10u))
211 #define SHA256_S2(x) (rotl32 ((x), 30u) ^ rotl32 ((x), 19u) ^ rotl32 ((x), 10u))
212 #define SHA256_S3(x) (rotl32 ((x), 26u) ^ rotl32 ((x), 21u) ^ rotl32 ((x), 7u))
213
214 #ifdef IS_NV
215 #if CUDA_ARCH >= 500
216 #define SHA256_F0(x,y,z) lut3_e8 ((x), (y), (z))
217 #define SHA256_F1(x,y,z) lut3_ca ((x), (y), (z))
218 #else
219 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
220 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
221 #endif
222 #define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
223 #define SHA256_F1o(x,y,z) (SHA256_F1 ((x), (y), (z)))
224 #endif
225
226 #ifdef IS_AMD
227 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
228 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
229 #define SHA256_F0o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
230 #define SHA256_F1o(x,y,z) (bitselect ((z), (y), (x)))
231 #endif
232
233 #ifdef IS_GENERIC
234 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
235 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
236 #define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
237 #define SHA256_F1o(x,y,z) (SHA256_F1 ((x), (y), (z)))
238 #endif
239
240 #define SHA256_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
241 { \
242 h += K; \
243 h += x; \
244 h += SHA256_S3 (e); \
245 h += F1 (e,f,g); \
246 d += h; \
247 h += SHA256_S2 (a); \
248 h += F0 (a,b,c); \
249 }
250
251 #define SHA256_EXPAND(x,y,z,w) (SHA256_S1 (x) + y + SHA256_S0 (z) + w)
252
253 #endif
254
255 #if defined _SHA384_ || defined _PDF17L8_
256
257 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
258
259 #define SHA384_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
260 #define SHA384_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
261 #define SHA384_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
262 #define SHA384_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
263
264 #define SHA384_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
265 #define SHA384_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
266
267 #ifdef IS_NV
268 #define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
269 #define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
270 #endif
271
272 #ifdef IS_AMD
273 #define SHA384_F0o(x,y,z) (bitselect ((z), (y), (x)))
274 #define SHA384_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
275 #endif
276
277 #ifdef IS_GENERIC
278 #define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
279 #define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
280 #endif
281
282 #define SHA384_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
283 { \
284 u64x temp0; \
285 temp0 = K; \
286 temp0 += x; \
287 temp0 += h; \
288 temp0 += SHA384_S1 (e); \
289 temp0 += F0 (e, f, g); \
290 d += temp0; \
291 h = SHA384_S0 (a); \
292 h += F1 (a, b, c); \
293 h += temp0; \
294 }
295
296 #define SHA384_EXPAND(x,y,z,w) (SHA384_S3 (x) + y + SHA384_S2 (z) + w)
297 #endif
298
299 #if defined _SHA512_ || defined _CLOUDKEY_ || defined _OFFICE2013_ || defined _PDF17L8_ || defined _PBKDF2_SHA512_
300
301 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
302
303 #define SHA512_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
304 #define SHA512_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
305 #define SHA512_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
306 #define SHA512_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
307
308 #define SHA512_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
309 #define SHA512_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
310
311 #ifdef IS_NV
312 #define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
313 #define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
314 #endif
315
316 #ifdef IS_AMD
317 #define SHA512_F0o(x,y,z) (bitselect ((z), (y), (x)))
318 #define SHA512_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
319 #endif
320
321 #ifdef IS_GENERIC
322 #define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
323 #define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
324 #endif
325
326 #define SHA512_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
327 { \
328 u64x temp0; \
329 temp0 = K; \
330 temp0 += x; \
331 temp0 += h; \
332 temp0 += SHA512_S1 (e); \
333 temp0 += F0 (e, f, g); \
334 d += temp0; \
335 h = SHA512_S0 (a); \
336 h += F1 (a, b, c); \
337 h += temp0; \
338 }
339
340 #define SHA512_EXPAND(x,y,z,w) (SHA512_S3 (x) + y + SHA512_S2 (z) + w)
341
342 #define SHA512_S2_WO(x) (rotate ((x), 64- 1ull) ^ rotate ((x), 64- 8ull) ^ SHIFT_RIGHT_64 ((x), 7))
343 #define SHA512_S3_WO(x) (rotate ((x), 64-19ull) ^ rotate ((x), 64-61ull) ^ SHIFT_RIGHT_64 ((x), 6))
344
345 #define SHA512_EXPAND_WO(x,y,z,w) (SHA512_S3_WO (x) + y + SHA512_S2_WO (z) + w)
346 #endif
347
348 #ifdef _RIPEMD160_
349
350 #ifdef IS_NV
351 #if CUDA_ARCH >= 500
352 #define RIPEMD160_F(x,y,z) lut3_96 ((x), (y), (z))
353 #define RIPEMD160_G(x,y,z) lut3_ca ((x), (y), (z))
354 #define RIPEMD160_H(x,y,z) lut3_59 ((x), (y), (z))
355 #define RIPEMD160_I(x,y,z) lut3_e4 ((x), (y), (z))
356 #define RIPEMD160_J(x,y,z) lut3_2d ((x), (y), (z))
357 #else
358 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
359 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
360 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
361 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
362 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
363 #endif
364 #define RIPEMD160_Go(x,y,z) (RIPEMD160_G ((x), (y), (z)))
365 #define RIPEMD160_Io(x,y,z) (RIPEMD160_I ((x), (y), (z)))
366 #endif
367
368 #ifdef IS_AMD
369 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
370 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
371 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
372 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
373 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
374 #define RIPEMD160_Go(x,y,z) (bitselect ((z), (y), (x)))
375 #define RIPEMD160_Io(x,y,z) (bitselect ((y), (x), (z)))
376 #endif
377
378 #ifdef IS_GENERIC
379 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
380 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
381 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
382 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
383 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
384 #define RIPEMD160_Go(x,y,z) (RIPEMD160_G ((x), (y), (z)))
385 #define RIPEMD160_Io(x,y,z) (RIPEMD160_I ((x), (y), (z)))
386 #endif
387
388 #define RIPEMD160_STEP(f,a,b,c,d,e,x,K,s) \
389 { \
390 a += K; \
391 a += x; \
392 a += f (b, c, d); \
393 a = rotl32 (a, s); \
394 a += e; \
395 c = rotl32 (c, 10u); \
396 }
397
398 #define ROTATE_LEFT_WORKAROUND_BUG(a,n) ((a << n) | (a >> (32 - n)))
399
400 #define RIPEMD160_STEP_WORKAROUND_BUG(f,a,b,c,d,e,x,K,s) \
401 { \
402 a += K; \
403 a += x; \
404 a += f (b, c, d); \
405 a = ROTATE_LEFT_WORKAROUND_BUG (a, s); \
406 a += e; \
407 c = rotl32 (c, 10u); \
408 }
409
410 #endif