Fix -m 1800 for NV
[hashcat.git] / include / kernel_functions.c
1 /**
2 * Author......: Jens Steube <jens.steube@gmail.com>
3 * License.....: MIT
4 */
5
6 #if defined _MD4_ || defined _DCC2_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _MS_DRSR_
7
8 #ifdef IS_NV
9 #if CUDA_ARCH >= 500
10 #define MD4_F(x,y,z) lut3_ca ((x), (y), (z))
11 #define MD4_G(x,y,z) lut3_e8 ((x), (y), (z))
12 #define MD4_H(x,y,z) lut3_96 ((x), (y), (z))
13 #define MD4_H1(x,y,z) lut3_96 ((x), (y), (z))
14 #define MD4_H2(x,y,z) lut3_96 ((x), (y), (z))
15 #else
16 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
17 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
18 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
19 #define MD4_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
20 #define MD4_H2(x,y,z) ((x) ^ tmp2)
21 #endif
22 #define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
23 #define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
24 #endif
25
26 #ifdef IS_AMD
27 #define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
28 #define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
29 #define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
30 #define MD4_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
31 #define MD4_H2(x,y,z) ((x) ^ tmp2)
32 #define MD4_Fo(x,y,z) (bitselect ((z), (y), (x)))
33 #define MD4_Go(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
34 #endif
35
36 #define MD4_STEP(f,a,b,c,d,x,K,s) \
37 { \
38 a += K; \
39 a += x; \
40 a += f (b, c, d); \
41 a = rotl32 (a, s); \
42 }
43
44 #define MD4_STEP0(f,a,b,c,d,K,s) \
45 { \
46 a += K; \
47 a += f (b, c, d); \
48 a = rotl32 (a, s); \
49 }
50
51 #endif
52
53 #if defined _MD5_ || defined _MD5H_ || defined _SAPB_ || defined _OLDOFFICE01_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _NETNTLMV2_ || defined _KRB5PA_ || defined _PBKDF2_MD5_
54
55 #ifdef IS_NV
56 #if CUDA_ARCH >= 500
57 #define MD5_F(x,y,z) lut3_ca ((x), (y), (z))
58 #define MD5_G(x,y,z) lut3_e4 ((x), (y), (z))
59 #define MD5_H(x,y,z) lut3_96 ((x), (y), (z))
60 #define MD5_H1(x,y,z) lut3_96 ((x), (y), (z))
61 #define MD5_H2(x,y,z) lut3_96 ((x), (y), (z))
62 #define MD5_I(x,y,z) lut3_39 ((x), (y), (z))
63 #else
64 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
65 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
66 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
67 #define MD5_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
68 #define MD5_H2(x,y,z) ((x) ^ tmp2)
69 #define MD5_I(x,y,z) ((y) ^ ((x) | ~(z)))
70 #endif
71 #define MD5_Fo(x,y,z) (MD5_F((x), (y), (z)))
72 #define MD5_Go(x,y,z) (MD5_G((x), (y), (z)))
73 #endif
74
75 #ifdef IS_AMD
76 #define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
77 #define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
78 #define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
79 #define MD5_H1(x,y,z) ((tmp2 = (x) ^ (y)) ^ (z))
80 #define MD5_H2(x,y,z) ((x) ^ tmp2)
81 #define MD5_I(x,y,z) (bitselect (0xffffffffU, (x), (z)) ^ (y))
82 #define MD5_Fo(x,y,z) (bitselect ((z), (y), (x)))
83 #define MD5_Go(x,y,z) (bitselect ((y), (x), (z)))
84 #endif
85
86 #define MD5_STEP(f,a,b,c,d,x,K,s) \
87 { \
88 a += K; \
89 a += x; \
90 a += f (b, c, d); \
91 a = rotl32 (a, s); \
92 a += b; \
93 }
94
95 #define MD5_STEP0(f,a,b,c,d,K,s) \
96 { \
97 a += K; \
98 a += f (b, c, d); \
99 a = rotl32 (a, s); \
100 a += b; \
101 }
102 #endif
103
104 #if defined _SHA1_ || defined _SAPG_ || defined _OFFICE2007_ || defined _OFFICE2010_ || defined _OLDOFFICE34_ || defined _ANDROIDFDE_ || defined _DCC2_ || defined _WPA_ || defined _MD5_SHA1_ || defined _SHA1_MD5_ || defined _PSAFE2_ || defined _LOTUS8_ || defined _PBKDF2_SHA1_ || defined _RAR3_ || defined _SHA256_SHA1_
105
106 #ifdef IS_NV
107 #if CUDA_ARCH >= 500
108 #define SHA1_F0(x,y,z) lut3_ca ((x), (y), (z))
109 #define SHA1_F1(x,y,z) lut3_96 ((x), (y), (z))
110 #define SHA1_F2(x,y,z) lut3_e8 ((x), (y), (z))
111 #else
112 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
113 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
114 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
115 #endif
116 #define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
117 #define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
118 #endif
119
120 #ifdef IS_AMD
121 #define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
122 #define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
123 #define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
124 #define SHA1_F0o(x,y,z) (bitselect ((z), (y), (x)))
125 #define SHA1_F2o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
126 #endif
127
128 #define SHA1_STEP(f,a,b,c,d,e,x) \
129 { \
130 e += K; \
131 e += x; \
132 e += f (b, c, d); \
133 e += rotl32 (a, 5u); \
134 b = rotl32 (b, 30u); \
135 }
136
137 #define SHA1_STEP0(f,a,b,c,d,e,x) \
138 { \
139 e += K; \
140 e += f (b, c, d); \
141 e += rotl32 (a, 5u); \
142 b = rotl32 (b, 30u); \
143 }
144
145 #define SHA1_STEPX(f,a,b,c,d,e,x) \
146 { \
147 e += x; \
148 e += f (b, c, d); \
149 e += rotl32 (a, 5u); \
150 b = rotl32 (b, 30u); \
151 }
152
153 #define SHA1_STEP_PE(f,a,b,c,d,e,x) \
154 { \
155 e += x; \
156 e += f (b, c, d); \
157 e += rotl32 (a, 5u); \
158 }
159
160 #define SHA1_STEP_PB(f,a,b,c,d,e,x) \
161 { \
162 e += K; \
163 b = rotl32 (b, 30u); \
164 }
165 #endif
166
167 #if defined _SHA256_ || defined _PDF17L8_ || defined _SEVEN_ZIP_ || defined _ANDROIDFDE_ || defined _CLOUDKEY_ || defined _SCRYPT_ || defined _PBKDF2_SHA256_ || defined _SHA256_SHA1_ || defined _MS_DRSR_
168
169 #define SHIFT_RIGHT_32(x,n) ((x) >> (n))
170
171 #define SHA256_S0(x) (rotl32 ((x), 25u) ^ rotl32 ((x), 14u) ^ SHIFT_RIGHT_32 ((x), 3u))
172 #define SHA256_S1(x) (rotl32 ((x), 15u) ^ rotl32 ((x), 13u) ^ SHIFT_RIGHT_32 ((x), 10u))
173 #define SHA256_S2(x) (rotl32 ((x), 30u) ^ rotl32 ((x), 19u) ^ rotl32 ((x), 10u))
174 #define SHA256_S3(x) (rotl32 ((x), 26u) ^ rotl32 ((x), 21u) ^ rotl32 ((x), 7u))
175
176 #ifdef IS_NV
177 #if CUDA_ARCH >= 500
178 #define SHA256_F0(x,y,z) lut3_e8 ((x), (y), (z))
179 #define SHA256_F1(x,y,z) lut3_ca ((x), (y), (z))
180 #else
181 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
182 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
183 #endif
184 #define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
185 #define SHA256_F1o(x,y,z) (SHA256_F1 ((x), (y), (z)))
186 #endif
187
188 #ifdef IS_AMD
189 #define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
190 #define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
191 #define SHA256_F0o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
192 #define SHA256_F1o(x,y,z) (bitselect ((z), (y), (x)))
193 #endif
194
195 #define SHA256_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
196 { \
197 h += K; \
198 h += x; \
199 h += SHA256_S3 (e); \
200 h += F1 (e,f,g); \
201 d += h; \
202 h += SHA256_S2 (a); \
203 h += F0 (a,b,c); \
204 }
205
206 #define SHA256_EXPAND(x,y,z,w) (SHA256_S1 (x) + y + SHA256_S0 (z) + w)
207
208 #endif
209
210 #if defined _SHA384_ || defined _PDF17L8_
211
212 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
213
214 #define SHA384_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
215 #define SHA384_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
216 #define SHA384_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
217 #define SHA384_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
218
219 #define SHA384_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
220 #define SHA384_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
221
222 #ifdef IS_NV
223 #define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
224 #define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
225 #endif
226
227 #ifdef IS_AMD
228 #define SHA384_F0o(x,y,z) (bitselect ((z), (y), (x)))
229 #define SHA384_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
230 #endif
231
232 #define SHA384_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
233 { \
234 u64 temp0; \
235 temp0 = K; \
236 temp0 += x; \
237 temp0 += h; \
238 temp0 += SHA384_S1 (e); \
239 temp0 += F0 (e, f, g); \
240 d += temp0; \
241 h = SHA384_S0 (a); \
242 h += F1 (a, b, c); \
243 h += temp0; \
244 }
245
246 #define SHA384_EXPAND(x,y,z,w) (SHA384_S3 (x) + y + SHA384_S2 (z) + w)
247 #endif
248
249 #if defined _SHA512_ || defined _CLOUDKEY_ || defined _OFFICE2013_ || defined _PDF17L8_ || defined _PBKDF2_SHA512_
250
251 #define SHIFT_RIGHT_64(x,n) ((x) >> (n))
252
253 #define SHA512_S0(x) (rotr64 ((x), 28) ^ rotr64 ((x), 34) ^ rotr64 ((x), 39))
254 #define SHA512_S1(x) (rotr64 ((x), 14) ^ rotr64 ((x), 18) ^ rotr64 ((x), 41))
255 #define SHA512_S2(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
256 #define SHA512_S3(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
257
258 #define SHA512_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
259 #define SHA512_F1(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
260
261 #ifdef IS_NV
262 #define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
263 #define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
264 #endif
265
266 #ifdef IS_AMD
267 #define SHA512_F0o(x,y,z) (bitselect ((z), (y), (x)))
268 #define SHA512_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
269 #endif
270
271 #define SHA512_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
272 { \
273 u64 temp0; \
274 temp0 = K; \
275 temp0 += x; \
276 temp0 += h; \
277 temp0 += SHA512_S1 (e); \
278 temp0 += F0 (e, f, g); \
279 d += temp0; \
280 h = SHA512_S0 (a); \
281 h += F1 (a, b, c); \
282 h += temp0; \
283 }
284
285 #define SHA512_EXPAND(x,y,z,w) (SHA512_S3 (x) + y + SHA512_S2 (z) + w)
286
287 #define SHA512_S2_WO(x) (rotr64 ((x), 1) ^ rotr64 ((x), 8) ^ SHIFT_RIGHT_64 ((x), 7))
288 #define SHA512_S3_WO(x) (rotr64 ((x), 19) ^ rotr64 ((x), 61) ^ SHIFT_RIGHT_64 ((x), 6))
289
290 #define SHA512_EXPAND_WO(x,y,z,w) (SHA512_S3_WO (x) + y + SHA512_S2_WO (z) + w)
291 #endif
292
293 #ifdef _RIPEMD160_
294
295 #ifdef IS_NV
296 #if CUDA_ARCH >= 500
297 #define RIPEMD160_F(x,y,z) lut3_96 ((x), (y), (z))
298 #define RIPEMD160_G(x,y,z) lut3_ca ((x), (y), (z))
299 #define RIPEMD160_H(x,y,z) lut3_59 ((x), (y), (z))
300 #define RIPEMD160_I(x,y,z) lut3_e4 ((x), (y), (z))
301 #define RIPEMD160_J(x,y,z) lut3_2d ((x), (y), (z))
302 #else
303 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
304 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
305 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
306 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
307 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
308 #endif
309 #define RIPEMD160_Go(x,y,z) (RIPEMD160_G ((x), (y), (z)))
310 #define RIPEMD160_Io(x,y,z) (RIPEMD160_I ((x), (y), (z)))
311 #endif
312
313 #ifdef IS_AMD
314 #define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
315 #define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
316 #define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))
317 #define RIPEMD160_I(x,y,z) ((y) ^ ((z) & ((x) ^ (y)))) /* z ? x : y */
318 #define RIPEMD160_J(x,y,z) ((x) ^ ((y) | ~(z)))
319 #define RIPEMD160_Go(x,y,z) (bitselect ((z), (y), (x)))
320 #define RIPEMD160_Io(x,y,z) (bitselect ((y), (x), (z)))
321 #endif
322
323 #define RIPEMD160_STEP(f,a,b,c,d,e,x,K,s) \
324 { \
325 a += K; \
326 a += x; \
327 a += f (b, c, d); \
328 a = rotl32 (a, s); \
329 a += e; \
330 c = rotl32 (c, 10u); \
331 }
332
333 #define ROTATE_LEFT_WORKAROUND_BUG(a,n) ((a << n) | (a >> (32 - n)))
334
335 #define RIPEMD160_STEP_WORKAROUND_BUG(f,a,b,c,d,e,x,K,s) \
336 { \
337 a += K; \
338 a += x; \
339 a += f (b, c, d); \
340 a = ROTATE_LEFT_WORKAROUND_BUG (a, s); \
341 a += e; \
342 c = rotl32 (c, 10u); \
343 }
344
345 #endif