#define _SHA256_
+#define NEW_SIMD_CODE
+
#include "include/constants.h"
#include "include/kernel_vendor.h"
#include "OpenCL/common.c"
#include "include/rp_kernel.h"
#include "OpenCL/rp.c"
-
-#define COMPARE_S "OpenCL/check_single_comp4.c"
-#define COMPARE_M "OpenCL/check_multi_comp4.c"
+#include "OpenCL/simd.c"
__constant u32 k_sha256[64] =
{
SHA256C3c, SHA256C3d, SHA256C3e, SHA256C3f,
};
-static void sha256_transform (u32 digest[8], const u32 w[16])
+#define SHA256_S0_S(x) (rotl32_S ((x), 25u) ^ rotl32_S ((x), 14u) ^ SHIFT_RIGHT_32 ((x), 3u))
+#define SHA256_S1_S(x) (rotl32_S ((x), 15u) ^ rotl32_S ((x), 13u) ^ SHIFT_RIGHT_32 ((x), 10u))
+
+#define SHA256_EXPAND_S(x,y,z,w) (SHA256_S1_S (x) + y + SHA256_S0_S (z) + w)
+
+static void sha256_transform (u32x digest[8], const u32x w[16])
{
- u32 a = digest[0];
- u32 b = digest[1];
- u32 c = digest[2];
- u32 d = digest[3];
- u32 e = digest[4];
- u32 f = digest[5];
- u32 g = digest[6];
- u32 h = digest[7];
-
- u32 w0_t = w[ 0];
- u32 w1_t = w[ 1];
- u32 w2_t = w[ 2];
- u32 w3_t = w[ 3];
- u32 w4_t = w[ 4];
- u32 w5_t = w[ 5];
- u32 w6_t = w[ 6];
- u32 w7_t = w[ 7];
- u32 w8_t = w[ 8];
- u32 w9_t = w[ 9];
- u32 wa_t = w[10];
- u32 wb_t = w[11];
- u32 wc_t = w[12];
- u32 wd_t = w[13];
- u32 we_t = w[14];
- u32 wf_t = w[15];
+ u32x a = digest[0];
+ u32x b = digest[1];
+ u32x c = digest[2];
+ u32x d = digest[3];
+ u32x e = digest[4];
+ u32x f = digest[5];
+ u32x g = digest[6];
+ u32x h = digest[7];
+
+ u32x w0_t = w[ 0];
+ u32x w1_t = w[ 1];
+ u32x w2_t = w[ 2];
+ u32x w3_t = w[ 3];
+ u32x w4_t = w[ 4];
+ u32x w5_t = w[ 5];
+ u32x w6_t = w[ 6];
+ u32x w7_t = w[ 7];
+ u32x w8_t = w[ 8];
+ u32x w9_t = w[ 9];
+ u32x wa_t = w[10];
+ u32x wb_t = w[11];
+ u32x wc_t = w[12];
+ u32x wd_t = w[13];
+ u32x we_t = w[14];
+ u32x wf_t = w[15];
#define ROUND_EXPAND() \
{ \
digest[7] += h;
}
-static void sha256_transform_z (u32 digest[8])
+static void sha256_transform_z (u32x digest[8])
{
- u32 a = digest[0];
- u32 b = digest[1];
- u32 c = digest[2];
- u32 d = digest[3];
- u32 e = digest[4];
- u32 f = digest[5];
- u32 g = digest[6];
- u32 h = digest[7];
+ u32x a = digest[0];
+ u32x b = digest[1];
+ u32x c = digest[2];
+ u32x d = digest[3];
+ u32x e = digest[4];
+ u32x f = digest[5];
+ u32x g = digest[6];
+ u32x h = digest[7];
#define ROUND_STEP_Z(i) \
{ \
digest[7] += h;
}
-static void sha256_transform_s (u32 digest[8], __local u32 *w)
+static void sha256_transform_s (u32x digest[8], __local u32 *w)
{
- u32 a = digest[0];
- u32 b = digest[1];
- u32 c = digest[2];
- u32 d = digest[3];
- u32 e = digest[4];
- u32 f = digest[5];
- u32 g = digest[6];
- u32 h = digest[7];
+ u32x a = digest[0];
+ u32x b = digest[1];
+ u32x c = digest[2];
+ u32x d = digest[3];
+ u32x e = digest[4];
+ u32x f = digest[5];
+ u32x g = digest[6];
+ u32x h = digest[7];
#define ROUND_STEP_S(i) \
{ \
* salt
*/
- const u32 salt_buf0 = swap32 (salt_bufs[salt_pos].salt_buf[ 0]);
- const u32 salt_buf1 = swap32 (salt_bufs[salt_pos].salt_buf[ 1]);
- const u32 salt_buf2 = swap32 (salt_bufs[salt_pos].salt_buf[ 2]); // 0x80
+ const u32 salt_buf0 = swap32_S (salt_bufs[salt_pos].salt_buf[ 0]);
+ const u32 salt_buf1 = swap32_S (salt_bufs[salt_pos].salt_buf[ 1]);
+ const u32 salt_buf2 = swap32_S (salt_bufs[salt_pos].salt_buf[ 2]); // 0x80
/**
* precompute final msg blocks
#pragma unroll
for (int i = 16; i < 64; i++)
{
- w_s1[i] = SHA256_EXPAND (w_s1[i - 2], w_s1[i - 7], w_s1[i - 15], w_s1[i - 16]);
+ w_s1[i] = SHA256_EXPAND_S (w_s1[i - 2], w_s1[i - 7], w_s1[i - 15], w_s1[i - 16]);
}
w_s2[ 0] = salt_buf0 << 16 | salt_buf1 >> 16;
#pragma unroll
for (int i = 16; i < 64; i++)
{
- w_s2[i] = SHA256_EXPAND (w_s2[i - 2], w_s2[i - 7], w_s2[i - 15], w_s2[i - 16]);
+ w_s2[i] = SHA256_EXPAND_S (w_s2[i - 2], w_s2[i - 7], w_s2[i - 15], w_s2[i - 16]);
}
}
* loop
*/
- for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
+ for (u32 il_pos = 0; il_pos < rules_cnt; il_pos += VECT_SIZE)
{
- u32 w0[4];
- u32 w1[4];
- u32 w2[4];
- u32 w3[4];
-
- w0[0] = pw_buf0[0];
- w0[1] = pw_buf0[1];
- w0[2] = pw_buf0[2];
- w0[3] = pw_buf0[3];
- w1[0] = pw_buf1[0];
- w1[1] = pw_buf1[1];
- w1[2] = pw_buf1[2];
- w1[3] = pw_buf1[3];
- w2[0] = 0;
- w2[1] = 0;
- w2[2] = 0;
- w2[3] = 0;
- w3[0] = 0;
- w3[1] = 0;
- w3[2] = 0;
- w3[3] = 0;
-
- const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
-
- u32 w0_t[4];
- u32 w1_t[4];
- u32 w2_t[4];
- u32 w3_t[4];
+ u32x w0[4] = { 0 };
+ u32x w1[4] = { 0 };
+ u32x w2[4] = { 0 };
+ u32x w3[4] = { 0 };
+
+ const u32 out_len = apply_rules_vect (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w0, w1);
+
+ u32x w0_t[4];
+ u32x w1_t[4];
+ u32x w2_t[4];
+ u32x w3_t[4];
make_unicode (w0, w0_t, w1_t);
make_unicode (w1, w2_t, w3_t);
- u32 w_t[16];
+ u32x w_t[16];
w_t[ 0] = swap32 (w0_t[0]);
w_t[ 1] = swap32 (w0_t[1]);
w_t[14] = w_t[14] >> 8;
w_t[15] = w_t[15] >> 8;
- u32 digest[8];
+ u32x digest[8];
digest[0] = SHA256M_A;
digest[1] = SHA256M_B;
sha256_transform_s (digest, w_s1); // 448 - 512
sha256_transform_s (digest, w_s2); // 512 - 576
- const u32 r0 = digest[3];
- const u32 r1 = digest[7];
- const u32 r2 = digest[2];
- const u32 r3 = digest[6];
-
- #include COMPARE_M
+ COMPARE_M_SIMD (digest[3], digest[7], digest[2], digest[6]);
}
}
* salt
*/
- const u32 salt_buf0 = swap32 (salt_bufs[salt_pos].salt_buf[ 0]);
- const u32 salt_buf1 = swap32 (salt_bufs[salt_pos].salt_buf[ 1]);
- const u32 salt_buf2 = swap32 (salt_bufs[salt_pos].salt_buf[ 2]); // 0x80
+ const u32 salt_buf0 = swap32_S (salt_bufs[salt_pos].salt_buf[ 0]);
+ const u32 salt_buf1 = swap32_S (salt_bufs[salt_pos].salt_buf[ 1]);
+ const u32 salt_buf2 = swap32_S (salt_bufs[salt_pos].salt_buf[ 2]); // 0x80
/**
* precompute final msg blocks
#pragma unroll
for (int i = 16; i < 64; i++)
{
- w_s1[i] = SHA256_EXPAND (w_s1[i - 2], w_s1[i - 7], w_s1[i - 15], w_s1[i - 16]);
+ w_s1[i] = SHA256_EXPAND_S (w_s1[i - 2], w_s1[i - 7], w_s1[i - 15], w_s1[i - 16]);
}
w_s2[ 0] = salt_buf0 << 16 | salt_buf1 >> 16;
#pragma unroll
for (int i = 16; i < 64; i++)
{
- w_s2[i] = SHA256_EXPAND (w_s2[i - 2], w_s2[i - 7], w_s2[i - 15], w_s2[i - 16]);
+ w_s2[i] = SHA256_EXPAND_S (w_s2[i - 2], w_s2[i - 7], w_s2[i - 15], w_s2[i - 16]);
}
}
* loop
*/
- for (u32 il_pos = 0; il_pos < rules_cnt; il_pos++)
+ for (u32 il_pos = 0; il_pos < rules_cnt; il_pos += VECT_SIZE)
{
- u32 w0[4];
- u32 w1[4];
- u32 w2[4];
- u32 w3[4];
-
- w0[0] = pw_buf0[0];
- w0[1] = pw_buf0[1];
- w0[2] = pw_buf0[2];
- w0[3] = pw_buf0[3];
- w1[0] = pw_buf1[0];
- w1[1] = pw_buf1[1];
- w1[2] = pw_buf1[2];
- w1[3] = pw_buf1[3];
- w2[0] = 0;
- w2[1] = 0;
- w2[2] = 0;
- w2[3] = 0;
- w3[0] = 0;
- w3[1] = 0;
- w3[2] = 0;
- w3[3] = 0;
-
- const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
-
- u32 w0_t[4];
- u32 w1_t[4];
- u32 w2_t[4];
- u32 w3_t[4];
+ u32x w0[4] = { 0 };
+ u32x w1[4] = { 0 };
+ u32x w2[4] = { 0 };
+ u32x w3[4] = { 0 };
+
+ const u32 out_len = apply_rules_vect (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w0, w1);
+
+ u32x w0_t[4];
+ u32x w1_t[4];
+ u32x w2_t[4];
+ u32x w3_t[4];
make_unicode (w0, w0_t, w1_t);
make_unicode (w1, w2_t, w3_t);
- u32 w_t[16];
+ u32x w_t[16];
w_t[ 0] = swap32 (w0_t[0]);
w_t[ 1] = swap32 (w0_t[1]);
w_t[14] = w_t[14] >> 8;
w_t[15] = w_t[15] >> 8;
- u32 digest[8];
+ u32x digest[8];
digest[0] = SHA256M_A;
digest[1] = SHA256M_B;
sha256_transform_s (digest, w_s1); // 448 - 512
sha256_transform_s (digest, w_s2); // 512 - 576
- const u32 r0 = digest[3];
- const u32 r1 = digest[7];
- const u32 r2 = digest[2];
- const u32 r3 = digest[6];
-
- #include COMPARE_S
+ COMPARE_S_SIMD (digest[3], digest[7], digest[2], digest[6]);
}
}