#define DGST_R3 3
#include "include/kernel_functions.c"
-#include "types_ocl.c"
-#include "common.c"
+#include "OpenCL/types_ocl.c"
+#include "OpenCL/common.c"
-#ifdef VECT_SIZE1
-#define COMPARE_M "check_multi_vect1_comp4.c"
-#endif
+#define COMPARE_S "OpenCL/check_single_comp4.c"
+#define COMPARE_M "OpenCL/check_multi_comp4.c"
__constant u32 te0[256] =
{
u32 g = digest[6];
u32 h = digest[7];
- u32 w0_t = swap_workaround (w[ 0]);
- u32 w1_t = swap_workaround (w[ 1]);
- u32 w2_t = swap_workaround (w[ 2]);
- u32 w3_t = swap_workaround (w[ 3]);
- u32 w4_t = swap_workaround (w[ 4]);
- u32 w5_t = swap_workaround (w[ 5]);
- u32 w6_t = swap_workaround (w[ 6]);
- u32 w7_t = swap_workaround (w[ 7]);
- u32 w8_t = swap_workaround (w[ 8]);
- u32 w9_t = swap_workaround (w[ 9]);
- u32 wa_t = swap_workaround (w[10]);
- u32 wb_t = swap_workaround (w[11]);
- u32 wc_t = swap_workaround (w[12]);
- u32 wd_t = swap_workaround (w[13]);
- u32 we_t = swap_workaround (w[14]);
- u32 wf_t = swap_workaround (w[15]);
+ u32 w0_t = swap32 (w[ 0]);
+ u32 w1_t = swap32 (w[ 1]);
+ u32 w2_t = swap32 (w[ 2]);
+ u32 w3_t = swap32 (w[ 3]);
+ u32 w4_t = swap32 (w[ 4]);
+ u32 w5_t = swap32 (w[ 5]);
+ u32 w6_t = swap32 (w[ 6]);
+ u32 w7_t = swap32 (w[ 7]);
+ u32 w8_t = swap32 (w[ 8]);
+ u32 w9_t = swap32 (w[ 9]);
+ u32 wa_t = swap32 (w[10]);
+ u32 wb_t = swap32 (w[11]);
+ u32 wc_t = swap32 (w[12]);
+ u32 wd_t = swap32 (w[13]);
+ u32 we_t = swap32 (w[14]);
+ u32 wf_t = swap32 (w[15]);
#define ROUND_EXPAND() \
{ \
const u32 s = a >> 8;
- #ifdef VECT_SIZE1
- a = crc32tab[k];
- #endif
-
- #ifdef VECT_SIZE2
- a.s0 = crc32tab[k.s0];
- a.s1 = crc32tab[k.s1];
- #endif
-
- #ifdef VECT_SIZE4
- a.s0 = crc32tab[k.s0];
- a.s1 = crc32tab[k.s1];
- a.s2 = crc32tab[k.s2];
- a.s3 = crc32tab[k.s3];
- #endif
+ a = crc32tab[k];
a ^= s;
u32 tmp1;
u32 tmp2;
+ #ifdef IS_NV
+ const int offset_minus_4 = 4 - (block_len & 3);
+
+ const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
+
+ tmp0 = __byte_perm ( 0, append[0], selector);
+ tmp1 = __byte_perm (append[0], append[1], selector);
+ tmp2 = __byte_perm (append[1], 0, selector);
+ #endif
+
+ #if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
tmp1 = tmp2;
tmp2 = 0;
}
+ #endif
u32 carry[2] = { 0, 0 };
u32 tmp7;
u32 tmp8;
+ #ifdef IS_NV
+ const int offset_minus_4 = 4 - (block_len & 3);
+
+ const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
+
+ tmp0 = __byte_perm ( 0, append[0], selector);
+ tmp1 = __byte_perm (append[0], append[1], selector);
+ tmp2 = __byte_perm (append[1], append[2], selector);
+ tmp3 = __byte_perm (append[2], append[3], selector);
+ tmp4 = __byte_perm (append[3], append[4], selector);
+ tmp5 = __byte_perm (append[4], append[5], selector);
+ tmp6 = __byte_perm (append[5], append[6], selector);
+ tmp7 = __byte_perm (append[6], append[7], selector);
+ tmp8 = __byte_perm (append[7], 0, selector);
+ #endif
+
+ #if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
tmp7 = tmp8;
tmp8 = 0;
}
+ #endif
u32 carry[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
return new_len;
}
-__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11600_init (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global seven_zip_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global seven_zip_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void m11600_init (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global seven_zip_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global seven_zip_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
tmps[gid].final_len = final_len;
}
-__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11600_loop (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global seven_zip_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global seven_zip_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void m11600_loop (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global seven_zip_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global seven_zip_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
tmps[gid].final_len = final_len;
}
-__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m11600_comp (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global seven_zip_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global seven_zip_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
+__kernel void m11600_comp (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global seven_zip_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global seven_zip_t *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
{
/**
* base
const u32 gid = get_global_id (0);
const u32 lid = get_local_id (0);
+ const u32 lsz = get_local_size (0);
/**
* aes shared
*/
- const u32 lid4 = lid * 4;
-
__local u32 s_td0[256];
__local u32 s_td1[256];
__local u32 s_td2[256];
__local u32 s_te3[256];
__local u32 s_te4[256];
- s_td0[lid4 + 0] = td0[lid4 + 0];
- s_td0[lid4 + 1] = td0[lid4 + 1];
- s_td0[lid4 + 2] = td0[lid4 + 2];
- s_td0[lid4 + 3] = td0[lid4 + 3];
-
- s_td1[lid4 + 0] = td1[lid4 + 0];
- s_td1[lid4 + 1] = td1[lid4 + 1];
- s_td1[lid4 + 2] = td1[lid4 + 2];
- s_td1[lid4 + 3] = td1[lid4 + 3];
-
- s_td2[lid4 + 0] = td2[lid4 + 0];
- s_td2[lid4 + 1] = td2[lid4 + 1];
- s_td2[lid4 + 2] = td2[lid4 + 2];
- s_td2[lid4 + 3] = td2[lid4 + 3];
-
- s_td3[lid4 + 0] = td3[lid4 + 0];
- s_td3[lid4 + 1] = td3[lid4 + 1];
- s_td3[lid4 + 2] = td3[lid4 + 2];
- s_td3[lid4 + 3] = td3[lid4 + 3];
-
- s_td4[lid4 + 0] = td4[lid4 + 0];
- s_td4[lid4 + 1] = td4[lid4 + 1];
- s_td4[lid4 + 2] = td4[lid4 + 2];
- s_td4[lid4 + 3] = td4[lid4 + 3];
-
- s_te0[lid4 + 0] = te0[lid4 + 0];
- s_te0[lid4 + 1] = te0[lid4 + 1];
- s_te0[lid4 + 2] = te0[lid4 + 2];
- s_te0[lid4 + 3] = te0[lid4 + 3];
-
- s_te1[lid4 + 0] = te1[lid4 + 0];
- s_te1[lid4 + 1] = te1[lid4 + 1];
- s_te1[lid4 + 2] = te1[lid4 + 2];
- s_te1[lid4 + 3] = te1[lid4 + 3];
-
- s_te2[lid4 + 0] = te2[lid4 + 0];
- s_te2[lid4 + 1] = te2[lid4 + 1];
- s_te2[lid4 + 2] = te2[lid4 + 2];
- s_te2[lid4 + 3] = te2[lid4 + 3];
-
- s_te3[lid4 + 0] = te3[lid4 + 0];
- s_te3[lid4 + 1] = te3[lid4 + 1];
- s_te3[lid4 + 2] = te3[lid4 + 2];
- s_te3[lid4 + 3] = te3[lid4 + 3];
-
- s_te4[lid4 + 0] = te4[lid4 + 0];
- s_te4[lid4 + 1] = te4[lid4 + 1];
- s_te4[lid4 + 2] = te4[lid4 + 2];
- s_te4[lid4 + 3] = te4[lid4 + 3];
+ for (u32 i = lid; i < 256; i += lsz)
+ {
+ s_td0[i] = td0[i];
+ s_td1[i] = td1[i];
+ s_td2[i] = td2[i];
+ s_td3[i] = td3[i];
+ s_td4[i] = td4[i];
+
+ s_te0[i] = te0[i];
+ s_te1[i] = te1[i];
+ s_te2[i] = te2[i];
+ s_te3[i] = te3[i];
+ s_te4[i] = te4[i];
+ }
barrier (CLK_LOCAL_MEM_FENCE);
u32 block_len = tmps[gid].block_len;
u32 final_len = tmps[gid].final_len;
- append_0x80_4 (block, block_len);
+ append_0x80_1x16 (block, block_len);
if (block_len >= 56)
{
bzero16 (block);
}
- block[15] = swap_workaround (final_len * 8);
+ block[15] = swap32 (final_len * 8);
sha256_transform (block, dgst);
{
u32 data[4];
- data[0] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 0]);
- data[1] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 1]);
- data[2] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 2]);
- data[3] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 3]);
+ data[0] = swap32 (esalt_bufs[salt_pos].data_buf[j + 0]);
+ data[1] = swap32 (esalt_bufs[salt_pos].data_buf[j + 1]);
+ data[2] = swap32 (esalt_bufs[salt_pos].data_buf[j + 2]);
+ data[3] = swap32 (esalt_bufs[salt_pos].data_buf[j + 3]);
u32 out[4];
iv[2] = data[2];
iv[3] = data[3];
- out[0] = swap_workaround (out[0]);
- out[1] = swap_workaround (out[1]);
- out[2] = swap_workaround (out[2]);
- out[3] = swap_workaround (out[3]);
+ out[0] = swap32 (out[0]);
+ out[1] = swap32 (out[1]);
+ out[2] = swap32 (out[2]);
+ out[3] = swap32 (out[3]);
crc = crc32 (out, 16, crc);
}
u32 data[4];
- data[0] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 0]);
- data[1] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 1]);
- data[2] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 2]);
- data[3] = swap_workaround (esalt_bufs[salt_pos].data_buf[j + 3]);
+ data[0] = swap32 (esalt_bufs[salt_pos].data_buf[j + 0]);
+ data[1] = swap32 (esalt_bufs[salt_pos].data_buf[j + 1]);
+ data[2] = swap32 (esalt_bufs[salt_pos].data_buf[j + 2]);
+ data[3] = swap32 (esalt_bufs[salt_pos].data_buf[j + 3]);
u32 out[4];
iv[2] = data[2];
iv[3] = data[3];
- out[0] = swap_workaround (out[0]);
- out[1] = swap_workaround (out[1]);
- out[2] = swap_workaround (out[2]);
- out[3] = swap_workaround (out[3]);
+ out[0] = swap32 (out[0]);
+ out[1] = swap32 (out[1]);
+ out[2] = swap32 (out[2]);
+ out[3] = swap32 (out[3]);
const u32 margin = data_len - unpack_size;