* tuning tools
*/
-#define GET_ACCEL(x) GPU_ACCEL_AMD_ ## x
-#define GET_LOOPS(x) GPU_LOOPS_AMD_ ## x
+#define GET_ACCEL(x) KERNEL_ACCEL_ ## x
+#define GET_LOOPS(x) KERNEL_LOOPS_ ## x
/**
* bit rotate
return b2->val - b1->val;
}
-void sp_setup_tbl (const char *install_dir, char *hcstat, uint disable, uint classic, hcstat_table_t *root_table_buf, hcstat_table_t *markov_table_buf)
+void sp_setup_tbl (const char *shared_dir, char *hcstat, uint disable, uint classic, hcstat_table_t *root_table_buf, hcstat_table_t *markov_table_buf)
{
uint i;
uint j;
memset (hcstat_tmp, 0, sizeof (hcstat_tmp));
- snprintf (hcstat_tmp, sizeof (hcstat_tmp) - 1, "%s/%s", install_dir, SP_HCSTAT);
+ snprintf (hcstat_tmp, sizeof (hcstat_tmp) - 1, "%s/%s", shared_dir, SP_HCSTAT);
hcstat = hcstat_tmp;
}
for (uint i = 0; USAGE_BIG[i] != NULL; i++) log_info (USAGE_BIG[i], progname);
}
+char *get_exec_path ()
+{
+ int exec_path_len = 1024;
+
+ char *exec_path = (char *) mymalloc (exec_path_len);
+
+ #ifdef LINUX
+
+ char tmp[32];
+
+ sprintf (tmp, "/proc/%d/exe", getpid ());
+
+ const int len = readlink (tmp, exec_path, exec_path_len - 1);
+
+ #endif
+
+ #ifdef WIN
+
+ const int len = GetModuleFileName (NULL, exec_path, exec_path_len - 1);
+
+ #endif
+
+ exec_path[len] = 0;
+
+ return exec_path;
+}
+
char *get_install_dir (const char *progname)
{
char *install_dir = mystrdup (progname);
return profile_dir;
}
-char *get_session_dir (const char *profile_dir, const char *session)
+char *get_session_dir (const char *profile_dir)
{
- char *session_dir = (char *) mymalloc (strlen (profile_dir) + 1 + strlen (session) + 1);
+ #define SESSIONS_FOLDER "sessions"
+
+ char *session_dir = (char *) mymalloc (strlen (profile_dir) + 1 + strlen (SESSIONS_FOLDER) + 1);
- sprintf (session_dir, "%s/%s", profile_dir, session);
+ sprintf (session_dir, "%s/%s", profile_dir, SESSIONS_FOLDER);
return session_dir;
}
return memcmp (r1, r2, sizeof (cpu_rule_t));
}
-int sort_by_gpu_rule (const void *p1, const void *p2)
+int sort_by_kernel_rule (const void *p1, const void *p2)
{
- const gpu_rule_t *r1 = (const gpu_rule_t *) p1;
- const gpu_rule_t *r2 = (const gpu_rule_t *) p2;
+ const kernel_rule_t *r1 = (const kernel_rule_t *) p1;
+ const kernel_rule_t *r2 = (const kernel_rule_t *) p2;
- return memcmp (r1, r2, sizeof (gpu_rule_t));
+ return memcmp (r1, r2, sizeof (kernel_rule_t));
}
int sort_by_stringptr (const void *p1, const void *p2)
if (weak_hash_found == 1) myfree (pot_right_ptr);
}
-uint devices_to_devicemask (char *gpu_devices)
+uint devices_to_devicemask (char *opencl_devices)
{
- uint gpu_devicemask = 0;
+ uint opencl_devicemask = 0;
- if (gpu_devices)
+ if (opencl_devices)
{
- char *devices = strdup (gpu_devices);
+ char *devices = strdup (opencl_devices);
char *next = strtok (devices, ",");
do
{
- uint gpu_id = atoi (next);
+ uint device_id = atoi (next);
- if (gpu_id < 1 || gpu_id > 8)
+ if (device_id < 1 || device_id > 8)
{
- log_error ("ERROR: invalid gpu_id %u specified", gpu_id);
+ log_error ("ERROR: invalid device_id %u specified", device_id);
exit (-1);
}
- gpu_devicemask |= 1 << (gpu_id - 1);
+ opencl_devicemask |= 1 << (device_id - 1);
} while ((next = strtok (NULL, ",")) != NULL);
free (devices);
}
- return gpu_devicemask;
+ return opencl_devicemask;
}
uint get_random_num (uint min, uint max)
case 12600: return ((char *) HT_12600); break;
case 12700: return ((char *) HT_12700); break;
case 12800: return ((char *) HT_12800); break;
+ case 12900: return ((char *) HT_12900); break;
}
return ((char *) "Unknown");
byte_swap_32 (digest_buf[7])
);
}
+ else if (hash_mode == 12900)
+ {
+ snprintf (out_buf, len-1, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
+ salt.salt_buf[ 4],
+ salt.salt_buf[ 5],
+ salt.salt_buf[ 6],
+ salt.salt_buf[ 7],
+ salt.salt_buf[ 8],
+ salt.salt_buf[ 9],
+ salt.salt_buf[10],
+ salt.salt_buf[11],
+ byte_swap_32 (digest_buf[0]),
+ byte_swap_32 (digest_buf[1]),
+ byte_swap_32 (digest_buf[2]),
+ byte_swap_32 (digest_buf[3]),
+ byte_swap_32 (digest_buf[4]),
+ byte_swap_32 (digest_buf[5]),
+ byte_swap_32 (digest_buf[6]),
+ byte_swap_32 (digest_buf[7]),
+ salt.salt_buf[ 0],
+ salt.salt_buf[ 1],
+ salt.salt_buf[ 2],
+ salt.salt_buf[ 3]
+ );
+ }
else
{
if (hash_type == HASH_TYPE_MD4)
if (words_done < words_cur) words_cur = words_done;
}
- // It's possible that a GPU's workload isn't finished right after a restore-case.
+ // It's possible that a device's workload isn't finished right after a restore-case.
// In that case, this function would return 0 and overwrite the real restore point
// There's also data.words_cur which is set to rd->words_cur but it changes while
// the attack is running therefore we should stick to rd->words_cur.
* adjustments
*/
-uint set_gpu_accel (uint hash_mode)
+uint set_kernel_accel (uint hash_mode)
{
switch (hash_mode)
{
case 12600: return GET_ACCEL (12600);
case 12700: return GET_ACCEL (12700);
case 12800: return GET_ACCEL (12800);
+ case 12900: return GET_ACCEL (12900);
}
return 0;
}
-uint set_gpu_loops (uint hash_mode)
+uint set_kernel_loops (uint hash_mode)
{
switch (hash_mode)
{
case 12600: return GET_LOOPS (12600);
case 12700: return GET_LOOPS (12700);
case 12800: return GET_LOOPS (12800);
+ case 12900: return GET_LOOPS (12900);
}
return 0;
}
// SAP user names cannot be longer than 12 characters
+ // this is kinda buggy. if the username is in utf the length can be up to length 12*3
+ // so far nobody complained so we stay with this because it helps in optimization
+ // final string can have a max size of 32 (password) + (10 * 5) = lengthMagicArray + 12 (max salt) + 1 (the 0x80)
+
if (user_len > 12) return (PARSER_SALT_LENGTH);
// SAP user name cannot start with ! or ?
return (PARSER_OK);
}
+int androidfde_samsung_parse_hash (char *input_buf, uint input_len, hash_t *hash_buf)
+{
+ if ((input_len < DISPLAY_LEN_MIN_12900) || (input_len > DISPLAY_LEN_MAX_12900)) return (PARSER_GLOBAL_LENGTH);
+
+ uint32_t *digest = (uint32_t *) hash_buf->digest;
+
+ salt_t *salt = hash_buf->salt;
+
+ /**
+ * parse line
+ */
+
+ char *hash_pos = input_buf + 64;
+ char *salt1_pos = input_buf + 128;
+ char *salt2_pos = input_buf;
+
+ /**
+ * salt
+ */
+
+ salt->salt_buf[ 0] = hex_to_uint (&salt1_pos[ 0]);
+ salt->salt_buf[ 1] = hex_to_uint (&salt1_pos[ 8]);
+ salt->salt_buf[ 2] = hex_to_uint (&salt1_pos[16]);
+ salt->salt_buf[ 3] = hex_to_uint (&salt1_pos[24]);
+
+ salt->salt_buf[ 4] = hex_to_uint (&salt2_pos[ 0]);
+ salt->salt_buf[ 5] = hex_to_uint (&salt2_pos[ 8]);
+ salt->salt_buf[ 6] = hex_to_uint (&salt2_pos[16]);
+ salt->salt_buf[ 7] = hex_to_uint (&salt2_pos[24]);
+
+ salt->salt_buf[ 8] = hex_to_uint (&salt2_pos[32]);
+ salt->salt_buf[ 9] = hex_to_uint (&salt2_pos[40]);
+ salt->salt_buf[10] = hex_to_uint (&salt2_pos[48]);
+ salt->salt_buf[11] = hex_to_uint (&salt2_pos[56]);
+
+ salt->salt_len = 48;
+
+ salt->salt_iter = ROUNDS_ANDROIDFDE_SAMSUNG - 1;
+
+ /**
+ * digest buf
+ */
+
+ digest[0] = hex_to_uint (&hash_pos[ 0]);
+ digest[1] = hex_to_uint (&hash_pos[ 8]);
+ digest[2] = hex_to_uint (&hash_pos[16]);
+ digest[3] = hex_to_uint (&hash_pos[24]);
+ digest[4] = hex_to_uint (&hash_pos[32]);
+ digest[5] = hex_to_uint (&hash_pos[40]);
+ digest[6] = hex_to_uint (&hash_pos[48]);
+ digest[7] = hex_to_uint (&hash_pos[56]);
+
+ return (PARSER_OK);
+}
+
/**
* parallel running threads
*/
}
/**
- * GPU rules
+ * device rules
*/
#define INCR_POS if (++rule_pos == rule_len) return (-1)
#define SET_NAME(rule,val) (rule)->cmds[rule_cnt] = ((val) & 0xff) << 0
#define SET_P0(rule,val) INCR_POS; (rule)->cmds[rule_cnt] |= ((val) & 0xff) << 8
#define SET_P1(rule,val) INCR_POS; (rule)->cmds[rule_cnt] |= ((val) & 0xff) << 16
-#define MAX_GPU_RULES 14
+#define MAX_KERNEL_RULES 255
#define GET_NAME(rule) rule_cmd = (((rule)->cmds[rule_cnt] >> 0) & 0xff)
#define GET_P0(rule) INCR_POS; rule_buf[rule_pos] = (((rule)->cmds[rule_cnt] >> 8) & 0xff)
#define GET_P1(rule) INCR_POS; rule_buf[rule_pos] = (((rule)->cmds[rule_cnt] >> 16) & 0xff)
#define GET_P0_CONV(rule) INCR_POS; rule_buf[rule_pos] = conv_itoc (((rule)->cmds[rule_cnt] >> 8) & 0xff)
#define GET_P1_CONV(rule) INCR_POS; rule_buf[rule_pos] = conv_itoc (((rule)->cmds[rule_cnt] >> 16) & 0xff)
-int cpu_rule_to_gpu_rule (char rule_buf[BUFSIZ], uint rule_len, gpu_rule_t *rule)
+int cpu_rule_to_kernel_rule (char rule_buf[BUFSIZ], uint rule_len, kernel_rule_t *rule)
{
uint rule_pos;
uint rule_cnt;
- for (rule_pos = 0, rule_cnt = 0; rule_pos < rule_len && rule_cnt < MAX_GPU_RULES; rule_pos++, rule_cnt++)
+ for (rule_pos = 0, rule_cnt = 0; rule_pos < rule_len && rule_cnt < MAX_KERNEL_RULES; rule_pos++, rule_cnt++)
{
switch (rule_buf[rule_pos])
{
return (0);
}
-int gpu_rule_to_cpu_rule (char rule_buf[BUFSIZ], gpu_rule_t *rule)
+int kernel_rule_to_cpu_rule (char rule_buf[BUFSIZ], kernel_rule_t *rule)
{
uint rule_cnt;
uint rule_pos;
char rule_cmd;
- for (rule_cnt = 0, rule_pos = 0; rule_pos < rule_len && rule_cnt < MAX_GPU_RULES; rule_pos++, rule_cnt++)
+ for (rule_cnt = 0, rule_pos = 0; rule_pos < rule_len && rule_cnt < MAX_KERNEL_RULES; rule_pos++, rule_cnt++)
{
GET_NAME (rule);