Fix use of LOP3
[hashcat.git] / include / kernel_functions.c
1 /**
2 * Author......: Jens Steube <jens.steube@gmail.com>
3 * License.....: MIT
4 */
5
6 #if defined _MD4_ || defined _DCC2_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _MS_DRSR_
7
8 #ifdef IS_NV
9 #if CUDA_ARCH >= 500
10 #define MD4_F(x,y,z) lut3_ca ((x), (y), (z))
11 #define MD4_G(x,y,z) lut3_e8 ((x), (y), (z))
12 #define MD4_H(x,y,z) lut3_96 ((x), (y), (z))
13 #define MD4_H1(x,y,z) lut3_96 ((x), (y), (z))
14 #define MD4_H2(x,y,z) lut3_96 ((x), (y), (z))
15 #else
16 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
17 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
18 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
19 #define MD4_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
20 #define MD4_H2(x,y,z) ((x) ^ tmp2)
21 #endif
22 #define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
23 #define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
24 #endif
25
26 #ifdef IS_AMD
27 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
28 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
29 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
30 #define MD4_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
31 #define MD4_H2(x,y,z) ((x) ^ tmp2)
32 #define MD4_Fo(x,y,z) (bitselect ((z), (y), (x)))
33 #define MD4_Go(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
34 #endif
35
36 #define MD4_STEP(f,a,b,c,d,x,K,s) \
37 { \
38 a += K; \
39 a += x; \
40 a += f (b, c, d); \
41 a = rotl32 (a, s); \
42 }
43
44 #define MD4_STEP0(f,a,b,c,d,K,s) \
45 { \
46 a += K; \
47 a += f (b, c, d); \
48 a = rotl32 (a, s); \
49 }
50
51 #endif
52
53 #if defined _MD5_ || defined _MD5H_ || defined _SAPB_ || defined _OLDOFFICE01_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _PBKDF2_MD5_
54
55 #ifdef IS_NV
56 #if CUDA_ARCH >= 500
57 #define MD5_F(x,y,z) lut3_ca ((x), (y), (z))
58 #define MD5_G(x,y,z) lut3_e4 ((x), (y), (z))
59 #define MD5_H(x,y,z) lut3_96 ((x), (y), (z))
60 #define MD5_H1(x,y,z) lut3_96 ((x), (y), (z))
61 #define MD5_H2(x,y,z) lut3_96 ((x), (y), (z))
62 #define MD5_I(x,y,z) lut3_39 ((x), (y), (z))
63 #else
64 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
65 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
66 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
67 #define MD5_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
68 #define MD5_H2(x,y,z) ((x) ^ tmp2)
69 #define MD5_I(x,y,z) ((y) ^ ((x) | ~(z)))
70 #endif
71 #define MD5_Fo(x,y,z) (MD5_F((x), (y), (z)))
72 #define MD5_Go(x,y,z) (MD5_G((x), (y), (z)))
73 #endif
74
75 #ifdef IS_AMD
76 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
77 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
78 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
79 #define MD5_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
80 #define MD5_H2(x,y,z) ((x) ^ tmp2)
81 #define MD5_I(x,y,z) (bitselect (0xffffffffU, (x), (z)) ^ (y))
82 #define MD5_Fo(x,y,z) (bitselect ((z), (y), (x)))
83 #define MD5_Go(x,y,z) (bitselect ((y), (x), (z)))
84 #endif
85
86 #define MD5_STEP(f,a,b,c,d,x,K,s) \
87 { \
88 a += K; \
89 a += x; \
90 a += f (b, c, d); \
91 a = rotl32 (a, s); \
92 a += b; \
93 }
94
95 #define MD5_STEP0(f,a,b,c,d,K,s) \
96 { \
97 a += K; \
98 a += f (b, c, d); \
99 a = rotl32 (a, s); \
100 a += b; \
101 }
102 #endif
103
104 #if defined _SHA1_ || defined _SAPG_ || defined _OFFICE2007_ || defined _OFFICE2010_ || defined _OLDOFFICE34_ || defined _ANDROIDFDE_ || defined _DCC2_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _PSAFE2_ || defined _LOTUS8_ || defined _PBKDF2_SHA1_ || defined _RAR3_ || defined _SHA256_SHA1_
105
106 /**
107 * SHA1 Functions
108 */
109
110 #ifdef IS_NV
111 #if CUDA_ARCH >= 500
112 #define SHA1_F0(x,y,z) lut3_ca ((x), (y), (z))
113 #define SHA1_F1(x,y,z) lut3_96 ((x), (y), (z))
114 #define SHA1_F2(x,y,z) lut3_e8 ((x), (y), (z))
115 #else
116 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
117 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
118 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
119 #endif
120 #define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
121 #define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
122 #endif
123
124 #ifdef IS_AMD
125 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
126 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
127 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
128 #define SHA1_F0o(x,y,z) (bitselect ((z), (y), (x)))
129 #define SHA1_F2o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
130 #endif
131
132 #define SHA1_STEP(f,a,b,c,d,e,x) \
133 { \
134 e += K; \
135 e += x; \
136 e += f (b, c, d); \
137 e += rotl32 (a, 5u); \
138 b = rotl32 (b, 30u); \
139 }
140
141 #define SHA1_STEP0(f,a,b,c,d,e,x) \
142 { \
143 e += K; \
144 e += f (b, c, d); \
145 e += rotl32 (a, 5u); \
146 b = rotl32 (b, 30u); \
147 }
148
149 #define SHA1_STEPX(f,a,b,c,d,e,x) \
150 { \
151 e += x; \
152 e += f (b, c, d); \
153 e += rotl32 (a, 5u); \
154 b = rotl32 (b, 30u); \
155 }
156
157 #define SHA1_STEP_PE(f,a,b,c,d,e,x) \
158 { \
159 e += x; \
160 e += f (b, c, d); \
161 e += rotl32 (a, 5u); \
162 }
163
164 #define SHA1_STEP_PB(f,a,b,c,d,e,x) \
165 { \
166 e += K; \
167 b = rotl32 (b, 30u); \
168 }
169 #endif
170
171 #if defined _SHA256_ || defined _PDF17L8_ || defined _SEVEN_ZIP_ || defined _ANDROIDFDE_ || defined _CLOUDKEY_ || defined _SCRYPT_ || defined _PBKDF2_SHA256_ || defined _SHA256_SHA1_ || defined _MS_DRSR_
172
173 #define SHIFT_RIGHT_32(x,n) ((x) >> (n))
174
175 #define SHA256_S0(x) (rotl32 ((x), 25u) ^ rotl32 ((x), 14u) ^ SHIFT_RIGHT_32 ((x), 3u))
176 #define SHA256_S1(x) (rotl32 ((x), 15u) ^ rotl32 ((x), 13u) ^ SHIFT_RIGHT_32 ((x), 10u))
177 #define SHA256_S2(x) (rotl32 ((x), 30u) ^ rotl32 ((x), 19u) ^ rotl32 ((x), 10u))
178 #define SHA256_S3(x) (rotl32 ((x), 26u) ^ rotl32 ((x), 21u) ^ rotl32 ((x), 7u))
179
180 #ifdef IS_NV
181 #if CUDA_ARCH >= 500
182 #define SHA256_F0(x,y,z) lut3_e8 ((x), (y), (z))
183 #define SHA256_F1(x,y,z) lut3_ca ((x), (y), (z))
184 #else
185 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
186 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
187 #endif
188 #define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
189 #define SHA256_F1o(x,y,z) (SHA256_F1 ((x), (y), (z)))
190 #endif
191
192 #ifdef IS_AMD
193 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
194 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
195 #define SHA256_F0o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
196 #define SHA256_F1o(x,y,z) (bitselect ((z), (y), (x)))
197 #endif
198
199 #define SHA256_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
200 { \
201 h += K; \
202 h += x; \
203 h += SHA256_S3 (e); \
204 h += F1 (e,f,g); \
205 d += h; \
206 h += SHA256_S2 (a); \
207 h += F0 (a,b,c); \
208 }
209
210 #define SHA256_EXPAND(x,y,z,w) (SHA256_S1 (x) + y + SHA256_S0 (z) + w)
211
212 #endif
213
214 #if defined _SHA384_ || defined _PDF17L8_
215
216 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
217
218 #define SHA384_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
219 #define SHA384_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
220 #define SHA384_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
221 #define SHA384_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
222
223 #define SHA384_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
224 #define SHA384_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
225
226 #ifdef IS_NV
227 #define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
228 #define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
229 #endif
230
231 #ifdef IS_AMD
232 #define SHA384_F0o(x,y,z) (bitselect ((z), (y), (x)))
233 #define SHA384_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
234 #endif
235
236 #define SHA384_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
237 { \
238 u64 temp0; \
239 temp0 = K; \
240 temp0 += x; \
241 temp0 += h; \
242 temp0 += SHA384_S1 (e); \
243 temp0 += F0 (e, f, g); \
244 d += temp0; \
245 h = SHA384_S0 (a); \
246 h += F1 (a, b, c); \
247 h += temp0; \
248 }
249
250 #define SHA384_EXPAND(x,y,z,w) (SHA384_S3 (x) + y + SHA384_S2 (z) + w)
251 #endif
252
253 #if defined _SHA512_ || defined _CLOUDKEY_ || defined _OFFICE2013_ || defined _PDF17L8_ || defined _PBKDF2_SHA512_
254 /**
255 * SHA512 Functions
256 */
257
258 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
259
260 #define SHA512_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
261 #define SHA512_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
262 #define SHA512_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
263 #define SHA512_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
264
265 #define SHA512_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
266 #define SHA512_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
267
268 #ifdef IS_NV
269 #define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
270 #define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
271 #endif
272
273 #ifdef IS_AMD
274 #define SHA512_F0o(x,y,z) (bitselect ((z), (y), (x)))
275 #define SHA512_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
276 #endif
277
278 #define SHA512_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
279 { \
280 u64 temp0; \
281 temp0 = K; \
282 temp0 += x; \
283 temp0 += h; \
284 temp0 += SHA512_S1 (e); \
285 temp0 += F0 (e, f, g); \
286 d += temp0; \
287 h = SHA512_S0 (a); \
288 h += F1 (a, b, c); \
289 h += temp0; \
290 }
291
292 #define SHA512_EXPAND(x,y,z,w) (SHA512_S3 (x) + y + SHA512_S2 (z) + w)
293
294 #define SHA512_S2_WO(x) (rotate ((x), 64- 1ull) ^ rotate ((x), 64- 8ull) ^ SHIFT_RIGHT_64 ((x), 7))
295 #define SHA512_S3_WO(x) (rotate ((x), 64-19ull) ^ rotate ((x), 64-61ull) ^ SHIFT_RIGHT_64 ((x), 6))
296
297 #define SHA512_EXPAND_WO(x,y,z,w) (SHA512_S3_WO (x) + y + SHA512_S2_WO (z) + w)
298 #endif
299
300 #ifdef _RIPEMD160_
301
302 #ifdef IS_NV
303 #if CUDA_ARCH >= 500
304 #define RIPEMD160_F(x,y,z) lut3_96 ((x), (y), (z))
305 #define RIPEMD160_G(x,y,z) lut3_ca ((x), (y), (z))
306 #define RIPEMD160_H(x,y,z) lut3_59 ((x), (y), (z))
307 #define RIPEMD160_I(x,y,z) lut3_e4 ((x), (y), (z))
308 #define RIPEMD160_J(x,y,z) lut3_2d ((x), (y), (z))
309 #else
310 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
311 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
312 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
313 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
314 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
315 #endif
316 #define RIPEMD160_Go(x,y,z) (RIPEMD160_G ((x), (y), (z)))
317 #define RIPEMD160_Io(x,y,z) (RIPEMD160_I ((x), (y), (z)))
318 #endif
319
320 #ifdef IS_AMD
321 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
322 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
323 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
324 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
325 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
326 #define RIPEMD160_Go(x,y,z) (bitselect ((z), (y), (x)))
327 #define RIPEMD160_Io(x,y,z) (bitselect ((y), (x), (z)))
328 #endif
329
330 #define RIPEMD160_STEP(f,a,b,c,d,e,x,K,s) \
331 { \
332 a += K; \
333 a += x; \
334 a += f (b, c, d); \
335 a = rotl32 (a, s); \
336 a += e; \
337 c = rotl32 (c, 10u); \
338 }
339
340 #define ROTATE_LEFT_WORKAROUND_BUG(a,n) ((a << n) | (a >> (32 - n)))
341
342 #define RIPEMD160_STEP_WORKAROUND_BUG(f,a,b,c,d,e,x,K,s) \
343 { \
344 a += K; \
345 a += x; \
346 a += f (b, c, d); \
347 a = ROTATE_LEFT_WORKAROUND_BUG (a, s); \
348 a += e; \
349 c = rotl32 (c, 10u); \
350 }
351
352 #endif