2599b3fd24199a8a6b9180f04d5ccd0ff85e5bbd
[hashcat.git] / OpenCL / simd.c
1
2 // vliw1
3
4 #if VECT_SIZE == 1
5
6 #define MATCHES_ONE_VV(a,b) ((a) == (b))
7 #define MATCHES_ONE_VS(a,b) ((a) == (b))
8
9 #define COMPARE_S_SIMD(h0,h1,h2,h3) \
10 { \
11 if (((h0) == search[0]) && ((h1) == search[1]) && ((h2) == search[2]) && ((h3) == search[3])) \
12 { \
13 const u32 final_hash_pos = digests_offset + 0; \
14 \
15 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
16 { \
17 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos); \
18 \
19 d_return_buf[lid] = 1; \
20 } \
21 } \
22 }
23
24 #define COMPARE_M_SIMD(h0,h1,h2,h3) \
25 { \
26 const u32 digest_tp0[4] = { h0, h1, h2, h3 }; \
27 \
28 if (check (digest_tp0, \
29 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
30 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
31 bitmap_mask, \
32 bitmap_shift1, \
33 bitmap_shift2)) \
34 { \
35 int hash_pos = find_hash (digest_tp0, digests_cnt, &digests_buf[digests_offset]); \
36 \
37 if (hash_pos != -1) \
38 { \
39 const u32 final_hash_pos = digests_offset + hash_pos; \
40 \
41 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
42 { \
43 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos); \
44 \
45 d_return_buf[lid] = 1; \
46 } \
47 } \
48 } \
49 }
50
51 #endif
52
53 // vliw2
54
55 #if VECT_SIZE == 2
56
57 #define MATCHES_ONE_VV(a,b) (((a).s0 == (b).s0) || ((a).s1 == (b).s1))
58 #define MATCHES_ONE_VS(a,b) (((a).s0 == (b) ) || ((a).s1 == (b) ))
59
60 #define COMPARE_S_SIMD(h0,h1,h2,h3) \
61 { \
62 if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
63 { \
64 const u32 final_hash_pos = digests_offset + 0; \
65 \
66 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
67 { \
68 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 0); \
69 \
70 d_return_buf[lid] = 1; \
71 } \
72 } \
73 \
74 if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
75 { \
76 const u32 final_hash_pos = digests_offset + 0; \
77 \
78 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
79 { \
80 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 1); \
81 \
82 d_return_buf[lid] = 1; \
83 } \
84 } \
85 }
86
87 #define COMPARE_M_SIMD(h0,h1,h2,h3) \
88 { \
89 const u32 digest_tp0[4] = { h0.s0, h1.s0, h2.s0, h3.s0 }; \
90 const u32 digest_tp1[4] = { h0.s1, h1.s1, h2.s1, h3.s1 }; \
91 \
92 if (check (digest_tp0, \
93 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
94 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
95 bitmap_mask, \
96 bitmap_shift1, \
97 bitmap_shift2)) \
98 { \
99 int hash_pos = find_hash (digest_tp0, digests_cnt, &digests_buf[digests_offset]); \
100 \
101 if (hash_pos != -1) \
102 { \
103 const u32 final_hash_pos = digests_offset + hash_pos; \
104 \
105 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
106 { \
107 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 0); \
108 \
109 d_return_buf[lid] = 1; \
110 } \
111 } \
112 } \
113 \
114 if (check (digest_tp1, \
115 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
116 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
117 bitmap_mask, \
118 bitmap_shift1, \
119 bitmap_shift2)) \
120 { \
121 int hash_pos = find_hash (digest_tp1, digests_cnt, &digests_buf[digests_offset]); \
122 \
123 if (hash_pos != -1) \
124 { \
125 const u32 final_hash_pos = digests_offset + hash_pos; \
126 \
127 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
128 { \
129 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 1); \
130 \
131 d_return_buf[lid] = 1; \
132 } \
133 } \
134 } \
135 }
136
137 #endif
138
139 // vliw4
140
141 #if VECT_SIZE == 4
142
143 #define MATCHES_ONE_VV(a,b) (((a).s0 == (b).s0) || ((a).s1 == (b).s1) || ((a).s2 == (b).s2) || ((a).s3 == (b).s3))
144 #define MATCHES_ONE_VS(a,b) (((a).s0 == (b) ) || ((a).s1 == (b) ) || ((a).s2 == (b) ) || ((a).s3 == (b) ))
145
146 #define COMPARE_S_SIMD(h0,h1,h2,h3) \
147 { \
148 if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
149 { \
150 const u32 final_hash_pos = digests_offset + 0; \
151 \
152 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
153 { \
154 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 0); \
155 \
156 d_return_buf[lid] = 1; \
157 } \
158 } \
159 \
160 if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
161 { \
162 const u32 final_hash_pos = digests_offset + 0; \
163 \
164 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
165 { \
166 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 1); \
167 \
168 d_return_buf[lid] = 1; \
169 } \
170 } \
171 \
172 if (((h0).s2 == search[0]) && ((h1).s2 == search[1]) && ((h2).s2 == search[2]) && ((h3).s2 == search[3])) \
173 { \
174 const u32 final_hash_pos = digests_offset + 0; \
175 \
176 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
177 { \
178 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 2); \
179 \
180 d_return_buf[lid] = 1; \
181 } \
182 } \
183 \
184 if (((h0).s3 == search[0]) && ((h1).s3 == search[1]) && ((h2).s3 == search[2]) && ((h3).s3 == search[3])) \
185 { \
186 const u32 final_hash_pos = digests_offset + 0; \
187 \
188 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
189 { \
190 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 3); \
191 \
192 d_return_buf[lid] = 1; \
193 } \
194 } \
195 }
196
197 #define COMPARE_M_SIMD(h0,h1,h2,h3) \
198 { \
199 const u32 digest_tp0[4] = { h0.s0, h1.s0, h2.s0, h3.s0 }; \
200 const u32 digest_tp1[4] = { h0.s1, h1.s1, h2.s1, h3.s1 }; \
201 const u32 digest_tp2[4] = { h0.s2, h1.s2, h2.s2, h3.s2 }; \
202 const u32 digest_tp3[4] = { h0.s3, h1.s3, h2.s3, h3.s3 }; \
203 \
204 if (check (digest_tp0, \
205 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
206 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
207 bitmap_mask, \
208 bitmap_shift1, \
209 bitmap_shift2)) \
210 { \
211 int hash_pos = find_hash (digest_tp0, digests_cnt, &digests_buf[digests_offset]); \
212 \
213 if (hash_pos != -1) \
214 { \
215 const u32 final_hash_pos = digests_offset + hash_pos; \
216 \
217 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
218 { \
219 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 0); \
220 \
221 d_return_buf[lid] = 1; \
222 } \
223 } \
224 } \
225 \
226 if (check (digest_tp1, \
227 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
228 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
229 bitmap_mask, \
230 bitmap_shift1, \
231 bitmap_shift2)) \
232 { \
233 int hash_pos = find_hash (digest_tp1, digests_cnt, &digests_buf[digests_offset]); \
234 \
235 if (hash_pos != -1) \
236 { \
237 const u32 final_hash_pos = digests_offset + hash_pos; \
238 \
239 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
240 { \
241 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 1); \
242 \
243 d_return_buf[lid] = 1; \
244 } \
245 } \
246 } \
247 \
248 if (check (digest_tp2, \
249 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
250 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
251 bitmap_mask, \
252 bitmap_shift1, \
253 bitmap_shift2)) \
254 { \
255 int hash_pos = find_hash (digest_tp2, digests_cnt, &digests_buf[digests_offset]); \
256 \
257 if (hash_pos != -1) \
258 { \
259 const u32 final_hash_pos = digests_offset + hash_pos; \
260 \
261 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
262 { \
263 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 2); \
264 \
265 d_return_buf[lid] = 1; \
266 } \
267 } \
268 } \
269 \
270 if (check (digest_tp3, \
271 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
272 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
273 bitmap_mask, \
274 bitmap_shift1, \
275 bitmap_shift2)) \
276 { \
277 int hash_pos = find_hash (digest_tp3, digests_cnt, &digests_buf[digests_offset]); \
278 \
279 if (hash_pos != -1) \
280 { \
281 const u32 final_hash_pos = digests_offset + hash_pos; \
282 \
283 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
284 { \
285 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 3); \
286 \
287 d_return_buf[lid] = 1; \
288 } \
289 } \
290 } \
291 }
292
293 #endif
294
295 // vliw8
296
297 #if VECT_SIZE == 8
298
299 #define MATCHES_ONE_VV(a,b) (((a).s0 == (b).s0) || ((a).s1 == (b).s1) || ((a).s2 == (b).s2) || ((a).s3 == (b).s3) || ((a).s4 == (b).s4) || ((a).s5 == (b).s5) || ((a).s6 == (b).s6) || ((a).s7 == (b).s7))
300 #define MATCHES_ONE_VS(a,b) (((a).s0 == (b) ) || ((a).s1 == (b) ) || ((a).s2 == (b) ) || ((a).s3 == (b) ) || ((a).s4 == (b) ) || ((a).s5 == (b) ) || ((a).s6 == (b) ) || ((a).s7 == (b) ))
301
302 #define COMPARE_S_SIMD(h0,h1,h2,h3) \
303 { \
304 if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
305 { \
306 const u32 final_hash_pos = digests_offset + 0; \
307 \
308 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
309 { \
310 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 0); \
311 \
312 d_return_buf[lid] = 1; \
313 } \
314 } \
315 \
316 if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
317 { \
318 const u32 final_hash_pos = digests_offset + 0; \
319 \
320 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
321 { \
322 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 1); \
323 \
324 d_return_buf[lid] = 1; \
325 } \
326 } \
327 \
328 if (((h0).s2 == search[0]) && ((h1).s2 == search[1]) && ((h2).s2 == search[2]) && ((h3).s2 == search[3])) \
329 { \
330 const u32 final_hash_pos = digests_offset + 0; \
331 \
332 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
333 { \
334 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 2); \
335 \
336 d_return_buf[lid] = 1; \
337 } \
338 } \
339 \
340 if (((h0).s3 == search[0]) && ((h1).s3 == search[1]) && ((h2).s3 == search[2]) && ((h3).s3 == search[3])) \
341 { \
342 const u32 final_hash_pos = digests_offset + 0; \
343 \
344 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
345 { \
346 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 3); \
347 \
348 d_return_buf[lid] = 1; \
349 } \
350 } \
351 if (((h0).s4 == search[0]) && ((h1).s4 == search[1]) && ((h2).s4 == search[2]) && ((h3).s4 == search[3])) \
352 { \
353 const u32 final_hash_pos = digests_offset + 0; \
354 \
355 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
356 { \
357 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 4); \
358 \
359 d_return_buf[lid] = 1; \
360 } \
361 } \
362 \
363 if (((h0).s5 == search[0]) && ((h1).s5 == search[1]) && ((h2).s5 == search[2]) && ((h3).s5 == search[3])) \
364 { \
365 const u32 final_hash_pos = digests_offset + 0; \
366 \
367 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
368 { \
369 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 5); \
370 \
371 d_return_buf[lid] = 1; \
372 } \
373 } \
374 \
375 if (((h0).s6 == search[0]) && ((h1).s6 == search[1]) && ((h2).s6 == search[2]) && ((h3).s6 == search[3])) \
376 { \
377 const u32 final_hash_pos = digests_offset + 0; \
378 \
379 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
380 { \
381 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 6); \
382 \
383 d_return_buf[lid] = 1; \
384 } \
385 } \
386 \
387 if (((h0).s7 == search[0]) && ((h1).s7 == search[1]) && ((h2).s7 == search[2]) && ((h3).s7 == search[3])) \
388 { \
389 const u32 final_hash_pos = digests_offset + 0; \
390 \
391 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
392 { \
393 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 7); \
394 \
395 d_return_buf[lid] = 1; \
396 } \
397 } \
398 }
399
400 #define COMPARE_M_SIMD(h0,h1,h2,h3) \
401 { \
402 const u32 digest_tp0[4] = { h0.s0, h1.s0, h2.s0, h3.s0 }; \
403 const u32 digest_tp1[4] = { h0.s1, h1.s1, h2.s1, h3.s1 }; \
404 const u32 digest_tp2[4] = { h0.s2, h1.s2, h2.s2, h3.s2 }; \
405 const u32 digest_tp3[4] = { h0.s3, h1.s3, h2.s3, h3.s3 }; \
406 const u32 digest_tp4[4] = { h0.s4, h1.s4, h2.s4, h3.s4 }; \
407 const u32 digest_tp5[4] = { h0.s5, h1.s5, h2.s5, h3.s5 }; \
408 const u32 digest_tp6[4] = { h0.s6, h1.s6, h2.s6, h3.s6 }; \
409 const u32 digest_tp7[4] = { h0.s7, h1.s7, h2.s7, h3.s7 }; \
410 \
411 if (check (digest_tp0, \
412 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
413 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
414 bitmap_mask, \
415 bitmap_shift1, \
416 bitmap_shift2)) \
417 { \
418 int hash_pos = find_hash (digest_tp0, digests_cnt, &digests_buf[digests_offset]); \
419 \
420 if (hash_pos != -1) \
421 { \
422 const u32 final_hash_pos = digests_offset + hash_pos; \
423 \
424 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
425 { \
426 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 0); \
427 \
428 d_return_buf[lid] = 1; \
429 } \
430 } \
431 } \
432 \
433 if (check (digest_tp1, \
434 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
435 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
436 bitmap_mask, \
437 bitmap_shift1, \
438 bitmap_shift2)) \
439 { \
440 int hash_pos = find_hash (digest_tp1, digests_cnt, &digests_buf[digests_offset]); \
441 \
442 if (hash_pos != -1) \
443 { \
444 const u32 final_hash_pos = digests_offset + hash_pos; \
445 \
446 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
447 { \
448 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 1); \
449 \
450 d_return_buf[lid] = 1; \
451 } \
452 } \
453 } \
454 \
455 if (check (digest_tp2, \
456 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
457 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
458 bitmap_mask, \
459 bitmap_shift1, \
460 bitmap_shift2)) \
461 { \
462 int hash_pos = find_hash (digest_tp2, digests_cnt, &digests_buf[digests_offset]); \
463 \
464 if (hash_pos != -1) \
465 { \
466 const u32 final_hash_pos = digests_offset + hash_pos; \
467 \
468 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
469 { \
470 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 2); \
471 \
472 d_return_buf[lid] = 1; \
473 } \
474 } \
475 } \
476 \
477 if (check (digest_tp3, \
478 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
479 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
480 bitmap_mask, \
481 bitmap_shift1, \
482 bitmap_shift2)) \
483 { \
484 int hash_pos = find_hash (digest_tp3, digests_cnt, &digests_buf[digests_offset]); \
485 \
486 if (hash_pos != -1) \
487 { \
488 const u32 final_hash_pos = digests_offset + hash_pos; \
489 \
490 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
491 { \
492 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 3); \
493 \
494 d_return_buf[lid] = 1; \
495 } \
496 } \
497 } \
498 if (check (digest_tp4, \
499 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
500 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
501 bitmap_mask, \
502 bitmap_shift1, \
503 bitmap_shift2)) \
504 { \
505 int hash_pos = find_hash (digest_tp4, digests_cnt, &digests_buf[digests_offset]); \
506 \
507 if (hash_pos != -1) \
508 { \
509 const u32 final_hash_pos = digests_offset + hash_pos; \
510 \
511 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
512 { \
513 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 4); \
514 \
515 d_return_buf[lid] = 1; \
516 } \
517 } \
518 } \
519 \
520 if (check (digest_tp5, \
521 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
522 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
523 bitmap_mask, \
524 bitmap_shift1, \
525 bitmap_shift2)) \
526 { \
527 int hash_pos = find_hash (digest_tp5, digests_cnt, &digests_buf[digests_offset]); \
528 \
529 if (hash_pos != -1) \
530 { \
531 const u32 final_hash_pos = digests_offset + hash_pos; \
532 \
533 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
534 { \
535 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 5); \
536 \
537 d_return_buf[lid] = 1; \
538 } \
539 } \
540 } \
541 \
542 if (check (digest_tp6, \
543 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
544 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
545 bitmap_mask, \
546 bitmap_shift1, \
547 bitmap_shift2)) \
548 { \
549 int hash_pos = find_hash (digest_tp6, digests_cnt, &digests_buf[digests_offset]); \
550 \
551 if (hash_pos != -1) \
552 { \
553 const u32 final_hash_pos = digests_offset + hash_pos; \
554 \
555 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
556 { \
557 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 6); \
558 \
559 d_return_buf[lid] = 1; \
560 } \
561 } \
562 } \
563 \
564 if (check (digest_tp7, \
565 bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, \
566 bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, \
567 bitmap_mask, \
568 bitmap_shift1, \
569 bitmap_shift2)) \
570 { \
571 int hash_pos = find_hash (digest_tp7, digests_cnt, &digests_buf[digests_offset]); \
572 \
573 if (hash_pos != -1) \
574 { \
575 const u32 final_hash_pos = digests_offset + hash_pos; \
576 \
577 if (atomic_add (&hashes_shown[final_hash_pos], 1) == 0) \
578 { \
579 mark_hash (plains_buf, hashes_shown, final_hash_pos, gid, il_pos + 7); \
580 \
581 d_return_buf[lid] = 1; \
582 } \
583 } \
584 } \
585 }
586
587 #endif
588
589 #define MATCHES_NONE_VV(a,b) !(MATCHES_ONE_VV ((a), (b)))
590 #define MATCHES_NONE_VS(a,b) !(MATCHES_ONE_VS ((a), (b)))