* License.....: MIT
*/
+#ifdef OSX
+#include <stdio.h>
+#endif
+
#include <common.h>
#include <shared.h>
-#include <rp_gpu_on_cpu.h>
+#include <rp_kernel_on_cpu.h>
#include <getopt.h>
const char *PROGNAME = "oclHashcat";
-const char *VERSION_TXT = "2.01";
-const uint VERSION_BIN = 201;
-const uint RESTORE_MIN = 201;
+const char *VERSION_TXT = "2.10";
+const uint VERSION_BIN = 210;
+const uint RESTORE_MIN = 210;
#define INCR_RULES 10000
#define INCR_SALTS 100000
#define GPU_TEMP_ABORT 90
#define GPU_TEMP_RETAIN 80
#define WORKLOAD_PROFILE 2
-#define GPU_ACCEL 0
-#define GPU_LOOPS 0
-#define GPU_RULES 1024
-#define GPU_COMBS 1024
-#define GPU_BFS 1024
-#define GPU_THREADS 64
+#define KERNEL_ACCEL 0
+#define KERNEL_LOOPS 0
+#define KERNEL_RULES 1024
+#define KERNEL_COMBS 1024
+#define KERNEL_BFS 1024
+#define KERNEL_THREADS 64
#define POWERTUNE_ENABLE 0
#define LOGFILE_DISABLE 0
#define SCRYPT_TMTO 0
+#define OPENCL_VECTOR_WIDTH 0
#define WL_MODE_STDIN 1
#define WL_MODE_FILE 2
#define ATTACK_KERN_BF 3
#define ATTACK_KERN_NONE 100
-#define ATTACK_EXEC_ON_CPU 10
-#define ATTACK_EXEC_ON_GPU 11
+#define ATTACK_EXEC_OUTSIDE_KERNEL 10
+#define ATTACK_EXEC_INSIDE_KERNEL 11
-#define COMBINATOR_MODE_BASE_LEFT 10001
-#define COMBINATOR_MODE_BASE_RIGHT 10002
+#define COMBINATOR_MODE_BASE_LEFT 10001
+#define COMBINATOR_MODE_BASE_RIGHT 10002
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
#define MAX_DICTSTAT 10000
-#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 128
+#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 130
#define global_free(attr) \
{ \
1000,
1100,
2100,
- 12800,
+ 12800,
1500,
12400,
500,
133,
11600,
12500,
+ 13000,
6211,
6221,
6231,
6241,
8800,
+ 12900,
12200,
9700,
9710,
* types
*/
-static void (*get_next_word_func) (char *, uint32_t, uint32_t *, uint32_t *);
+static void (*get_next_word_func) (char *, u32, u32 *, u32 *);
/**
* globals
" --induction-dir=FOLDER Specify induction directory to use, default is $session.induct",
" --outfile-check-dir=FOLDER Specify the outfile directory which should be monitored, default is $session.outfiles",
" --logfile-disable Disable the logfile",
- " --truecrypt-keyfiles=FILE Keyfiles used, seperate with comma",
+ " --truecrypt-keyfiles=FILE Keyfiles used, separate with comma",
"",
"* Resources:",
"",
" -c, --segment-size=NUM Size in MB to cache from the wordfile",
" --bitmap-min=NUM Minimum number of bits allowed for bitmaps",
" --bitmap-max=NUM Maximum number of bits allowed for bitmaps",
- " --cpu-affinity=STR Locks to CPU devices, seperate with comma",
- " -d, --gpu-devices=STR Devices to use, separate with comma",
+ #ifndef OSX
+ " --cpu-affinity=STR Locks to CPU devices, separate with comma",
+ #else
+ " --cpu-affinity=STR Locks to CPU devices, separate with comma (disabled on OSX)",
+ #endif
+ " --opencl-platforms=STR OpenCL platforms to use, separate with comma",
+ " -d, --opencl-devices=STR OpenCL devices to use, separate with comma",
+ " --opencl-device-types=STR OpenCL device-types to use, separate with comma, see references below",
+ " --opencl-vector-width=NUM OpenCL vector-width (either 1, 2, 4 or 8), overrides value from device query",
" -w, --workload-profile=NUM Enable a specific workload profile, see references below",
- " -n, --gpu-accel=NUM Workload tuning: 1, 8, 40, 80, 160",
- " -u, --gpu-loops=NUM Workload fine-tuning: 8 - 1024",
+ " -n, --kernel-accel=NUM Workload tuning: 1, 8, 40, 80, 160",
+ " -u, --kernel-loops=NUM Workload fine-tuning: 8 - 1024",
+ #ifdef HAVE_HWMON
" --gpu-temp-disable Disable temperature and fanspeed readings and triggers",
" --gpu-temp-abort=NUM Abort session if GPU temperature reaches NUM degrees celsius",
" --gpu-temp-retain=NUM Try to retain GPU temperature at NUM degrees celsius (AMD only)",
+ #ifdef HAVE_ADL
" --powertune-enable Enable automatic power tuning option (AMD OverDrive 6 only)",
+ #endif
+ #endif
" --scrypt-tmto=NUM Manually override automatically calculated TMTO value for scrypt",
"",
"* Distributed:",
" 0 = Manual Tuning",
" 1 = Performance Tuning, default",
"",
+ "* OpenCL device-types:",
+ "",
+ " 1 = CPU devices",
+ " 2 = GPU devices",
+ " 3 = Accelerator devices (FPGA, CELL Blade, etc.)",
+ "",
"* Outfile Formats:",
"",
" 1 = hash[:salt]",
" 12300 = Oracle T: Type (Oracle 12+)",
" 8000 = Sybase ASE",
"",
- "[[ HTTP, SMTP, LDAP Server]]",
+ "[[ HTTP, SMTP, LDAP Server ]]",
"",
" 141 = EPiServer 6.x < v4",
" 1441 = EPiServer 6.x > v4",
"",
" 11600 = 7-Zip",
" 12500 = RAR3-hp",
+ " 13000 = RAR5",
"",
"[[ Full-Disk encryptions (FDE) ]]",
"",
" Y = 2 = XTS 1024 bit (Ciphers: AES or Serpent or Twofish or AES-Twofish or Serpent-AES or Twofish-Serpent)",
" Y = 3 = XTS 1536 bit (Ciphers: All)",
" 8800 = Android FDE < v4.3",
+ " 12900 = Android FDE (Samsung DEK)",
" 12200 = eCryptfs",
"",
"[[ Documents ]]",
{
hc_device_param_t *device_param = &data.devices_param[device_id];
- uint64_t speed_cnt = 0;
- float speed_ms = 0;
+ if (device_param->skipped) continue;
+
+ u64 speed_cnt = 0;
+ float speed_ms = 0;
for (int i = 0; i < SPEED_CACHE; i++)
{
* words_cur
*/
- uint64_t words_cur = get_lowest_words_done ();
+ u64 words_cur = get_lowest_words_done ();
fprintf (out, "CURKU\t%llu\t", (unsigned long long int) words_cur);
if (salts_left == 0) salts_left = 1;
- uint64_t progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * salts_left;
- uint64_t all_done = 0;
- uint64_t all_rejected = 0;
- uint64_t all_restored = 0;
+ u64 all_done = 0;
+ u64 all_rejected = 0;
+ u64 all_restored = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
all_restored += data.words_progress_restored[salt_pos];
}
- uint64_t progress_cur = all_restored + all_done + all_rejected;
- uint64_t progress_end = progress_total;
+ u64 progress_cur = all_restored + all_done + all_rejected;
+ u64 progress_end = progress_total;
- uint64_t progress_skip = 0;
+ u64 progress_skip = 0;
if (data.skip)
{
progress_skip = MIN (data.skip, data.words_base) * salts_left;
- if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_skip *= data.gpu_rules_cnt;
+ if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_skip *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_skip *= data.combs_cnt;
else if (data.attack_kern == ATTACK_KERN_BF) progress_skip *= data.bfs_cnt;
}
{
progress_end = MIN (data.limit, data.words_base) * salts_left;
- if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_end *= data.gpu_rules_cnt;
+ if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_end *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_end *= data.combs_cnt;
else if (data.attack_kern == ATTACK_KERN_BF) progress_end *= data.bfs_cnt;
}
- uint64_t progress_cur_relative_skip = progress_cur - progress_skip;
- uint64_t progress_end_relative_skip = progress_end - progress_skip;
+ u64 progress_cur_relative_skip = progress_cur - progress_skip;
+ u64 progress_end_relative_skip = progress_end - progress_skip;
fprintf (out, "PROGRESS\t%llu\t%llu\t", (unsigned long long int) progress_cur_relative_skip, (unsigned long long int) progress_end_relative_skip);
* temperature
*/
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
fprintf (out, "TEMP\t");
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- int temp = hm_get_temperature_with_device_id (i);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ int temp = hm_get_temperature_with_device_id (device_id);
fprintf (out, "%d\t", temp);
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
#ifdef _WIN
fputc ('\r', out);
* speed new
*/
- uint64_t speed_cnt[DEVICES_MAX];
- float speed_ms[DEVICES_MAX];
+ u64 speed_cnt[DEVICES_MAX];
+ float speed_ms[DEVICES_MAX];
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
- // we need to clear values (set to 0) because in case the gpu does
+ if (device_param->skipped) continue;
+
+ // we need to clear values (set to 0) because in case the device does
// not get new candidates it idles around but speed display would
// show it as working.
// if we instantly set it to 0 after reading it happens that the
- // speed can be shown as zero if the users refreshs to fast.
+ // speed can be shown as zero if the users refreshes too fast.
// therefore, we add a timestamp when a stat was recorded and if its
- // to old we will not use it
+ // too old we will not use it
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
hashes_dev_ms[device_id] = 0;
if (speed_ms[device_id])
if (salts_left == 0) salts_left = 1;
- uint64_t progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * salts_left;
- uint64_t all_done = 0;
- uint64_t all_rejected = 0;
- uint64_t all_restored = 0;
+ u64 all_done = 0;
+ u64 all_rejected = 0;
+ u64 all_restored = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
all_restored += data.words_progress_restored[salt_pos];
}
- uint64_t progress_cur = all_restored + all_done + all_rejected;
- uint64_t progress_end = progress_total;
+ u64 progress_cur = all_restored + all_done + all_rejected;
+ u64 progress_end = progress_total;
- uint64_t progress_skip = 0;
+ u64 progress_skip = 0;
if (data.skip)
{
progress_skip = MIN (data.skip, data.words_base) * salts_left;
- if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_skip *= data.gpu_rules_cnt;
+ if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_skip *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_skip *= data.combs_cnt;
else if (data.attack_kern == ATTACK_KERN_BF) progress_skip *= data.bfs_cnt;
}
{
progress_end = MIN (data.limit, data.words_base) * salts_left;
- if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_end *= data.gpu_rules_cnt;
+ if (data.attack_kern == ATTACK_KERN_STRAIGHT) progress_end *= data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) progress_end *= data.combs_cnt;
else if (data.attack_kern == ATTACK_KERN_BF) progress_end *= data.bfs_cnt;
}
- uint64_t progress_cur_relative_skip = progress_cur - progress_skip;
- uint64_t progress_end_relative_skip = progress_end - progress_skip;
+ u64 progress_cur_relative_skip = progress_cur - progress_skip;
+ u64 progress_end_relative_skip = progress_end - progress_skip;
- float speed_ms_real = ms_running - ms_paused;
- uint64_t speed_plains_real = all_done;
+ float speed_ms_real = ms_running - ms_paused;
+ u64 speed_plains_real = all_done;
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
if (data.devices_status != STATUS_CRACKED)
{
- uint64_t words_per_ms = 0;
+ u64 words_per_ms = 0;
if (speed_plains_real && speed_ms_real)
{
if (words_per_ms)
{
- uint64_t progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
+ u64 progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
- uint64_t ms_left = progress_left_relative_skip / words_per_ms;
+ u64 ms_left = progress_left_relative_skip / words_per_ms;
sec_etc = ms_left / 1000;
}
{
log_info ("Time.Estimated.: 0 secs");
}
- else if ((uint64_t) sec_etc > ETC_MAX)
+ else if ((u64) sec_etc > ETC_MAX)
{
log_info ("Time.Estimated.: > 10 Years");
}
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- char display_dev_cur[16];
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- memset (display_dev_cur, 0, sizeof (display_dev_cur));
+ char display_dev_cur[16] = { 0 };
strncpy (display_dev_cur, "0.00", 4);
format_speed_display (hashes_dev_ms[device_id] * 1000, display_dev_cur, sizeof (display_dev_cur));
- log_info ("Speed.GPU.#%d...: %9sH/s", device_id + 1, display_dev_cur);
+ log_info ("Speed.Dev.#%d...: %9sH/s", device_id + 1, display_dev_cur);
}
- char display_all_cur[16];
-
- memset (display_all_cur, 0, sizeof (display_all_cur));
+ char display_all_cur[16] = { 0 };
strncpy (display_all_cur, "0.00", 4);
format_speed_display (hashes_all_ms * 1000, display_all_cur, sizeof (display_all_cur));
- if (data.devices_cnt > 1) log_info ("Speed.GPU.#*...: %9sH/s", display_all_cur);
+ if (data.devices_active > 1) log_info ("Speed.Dev.#*...: %9sH/s", display_all_cur);
const float digests_percent = (float) data.digests_done / data.digests_cnt;
const float salts_percent = (float) data.salts_done / data.salts_cnt;
// Restore point
- uint64_t restore_point = get_lowest_words_done ();
+ u64 restore_point = get_lowest_words_done ();
- uint64_t restore_total = data.words_base;
+ u64 restore_total = data.words_base;
float percent_restore = 0;
{
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
- log_info ("Progress.......: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
- log_info ("Rejected.......: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
+ log_info ("Progress.......: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
+ log_info ("Rejected.......: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
if (data.restore_disable == 0)
{
- log_info ("Restore.Point..: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
+ log_info ("Restore.Point..: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
}
}
else
}
}
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].fan_supported == 1)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ #define HM_STR_BUF_SIZE 255
+
+ if (data.hm_device[device_id].fan_supported == 1)
{
- const int temperature = hm_get_temperature_with_device_id (i);
- const int utilization = hm_get_utilization_with_device_id (i);
- const int fanspeed = hm_get_fanspeed_with_device_id (i);
+ char utilization[HM_STR_BUF_SIZE];
+ char temperature[HM_STR_BUF_SIZE];
+ char fanspeed[HM_STR_BUF_SIZE];
+
+ hm_device_val_to_str ((char *) utilization, HM_STR_BUF_SIZE, "%", hm_get_utilization_with_device_id (device_id));
+ hm_device_val_to_str ((char *) temperature, HM_STR_BUF_SIZE, "c", hm_get_temperature_with_device_id (device_id));
- if (data.vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
- log_info ("HWMon.GPU.#%d...: %2d%% Util, %2dc Temp, %2d%% Fan", i + 1, utilization, temperature, fanspeed);
+ hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "%", hm_get_fanspeed_with_device_id (device_id));
}
-
- if (data.vendor_id == VENDOR_ID_NV)
+ else if (device_param->vendor_id == VENDOR_ID_NV)
{
#ifdef LINUX
- log_info ("HWMon.GPU.#%d...: %2d%% Util, %2dc Temp, %2d%% Fan", i + 1, utilization, temperature, fanspeed);
+ hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "%", hm_get_fanspeed_with_device_id (device_id));
#else
- log_info ("HWMon.GPU.#%d...: %2d%% Util, %2dc Temp, %2drpm Fan", i + 1, utilization, temperature, fanspeed);
+ hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "rpm", hm_get_fanspeed_with_device_id (device_id));
#endif
}
+
+ log_info ("HWMon.GPU.#%d...: %s Util, %s Temp, %s Fan", device_id + 1, utilization, temperature, fanspeed);
}
else
{
- const int temperature = hm_get_temperature_with_device_id (i);
- const int utilization = hm_get_utilization_with_device_id (i);
+ char utilization[HM_STR_BUF_SIZE];
+ char temperature[HM_STR_BUF_SIZE];
- log_info ("HWMon.GPU.#%d...: %2d%% Util, %2dc Temp, N/A Fan", i + 1, utilization, temperature);
+ hm_device_val_to_str ((char *) utilization, HM_STR_BUF_SIZE, "%", hm_get_utilization_with_device_id (device_id));
+ hm_device_val_to_str ((char *) temperature, HM_STR_BUF_SIZE, "c", hm_get_temperature_with_device_id (device_id));
+
+ log_info ("HWMon.GPU.#%d...: %s Util, %s Temp, N/A Fan", device_id + 1, utilization, temperature);
}
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
}
static void status_benchmark ()
if (data.words_cnt == 0) return;
- uint64_t speed_cnt[DEVICES_MAX];
- float speed_ms[DEVICES_MAX];
-
- uint device_id;
+ u64 speed_cnt[DEVICES_MAX];
+ float speed_ms[DEVICES_MAX];
- for (device_id = 0; device_id < data.devices_cnt; device_id++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
hashes_dev_ms[device_id] = 0;
if (speed_ms[device_id])
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- char display_dev_cur[16];
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- memset (display_dev_cur, 0, sizeof (display_dev_cur));
+ if (device_param->skipped) continue;
+
+ char display_dev_cur[16] = { 0 };
strncpy (display_dev_cur, "0.00", 4);
format_speed_display (hashes_dev_ms[device_id] * 1000, display_dev_cur, sizeof (display_dev_cur));
- log_info ("Speed.GPU.#%d.: %9sH/s", device_id + 1, display_dev_cur);
+ log_info ("Speed.Dev.#%d.: %9sH/s", device_id + 1, display_dev_cur);
}
- char display_all_cur[16];
-
- memset (display_all_cur, 0, sizeof (display_all_cur));
+ char display_all_cur[16] = { 0 };
strncpy (display_all_cur, "0.00", 4);
format_speed_display (hashes_all_ms * 1000, display_all_cur, sizeof (display_all_cur));
- if (data.devices_cnt > 1) log_info ("Speed.GPU.#*.: %9sH/s", display_all_cur);
+ if (data.devices_active > 1) log_info ("Speed.Dev.#*.: %9sH/s", display_all_cur);
}
/**
* oclHashcat -only- functions
*/
-static void generate_source_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *install_dir, char *source_file)
+static void generate_source_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *shared_dir, char *source_file)
{
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (attack_kern == ATTACK_KERN_STRAIGHT)
- snprintf (source_file, 255, "%s/OpenCL/m%05d_a0.cl", install_dir, (int) kern_type);
+ snprintf (source_file, 255, "%s/OpenCL/m%05d_a0.cl", shared_dir, (int) kern_type);
else if (attack_kern == ATTACK_KERN_COMBI)
- snprintf (source_file, 255, "%s/OpenCL/m%05d_a1.cl", install_dir, (int) kern_type);
+ snprintf (source_file, 255, "%s/OpenCL/m%05d_a1.cl", shared_dir, (int) kern_type);
else if (attack_kern == ATTACK_KERN_BF)
- snprintf (source_file, 255, "%s/OpenCL/m%05d_a3.cl", install_dir, (int) kern_type);
+ snprintf (source_file, 255, "%s/OpenCL/m%05d_a3.cl", shared_dir, (int) kern_type);
}
else
- snprintf (source_file, 255, "%s/OpenCL/m%05d.cl", install_dir, (int) kern_type);
+ snprintf (source_file, 255, "%s/OpenCL/m%05d.cl", shared_dir, (int) kern_type);
}
-static void generate_cached_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *install_dir, char *device_name, char *device_version, char *driver_version, int vendor_id, char *cached_file)
+static void generate_cached_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *profile_dir, char *device_name_chksum, char *cached_file)
{
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (attack_kern == ATTACK_KERN_STRAIGHT)
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d_a0.%s_%s_%s_%d.kernel", install_dir, vendor_id, (int) kern_type, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/m%05d_a0.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
else if (attack_kern == ATTACK_KERN_COMBI)
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d_a1.%s_%s_%s_%d.kernel", install_dir, vendor_id, (int) kern_type, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/m%05d_a1.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
else if (attack_kern == ATTACK_KERN_BF)
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d_a3.%s_%s_%s_%d.kernel", install_dir, vendor_id, (int) kern_type, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/m%05d_a3.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
}
else
{
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d.%s_%s_%s_%d.kernel", install_dir, vendor_id, (int) kern_type, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/m%05d.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
}
}
-static void generate_source_kernel_mp_filename (const uint opti_type, const uint opts_type, char *install_dir, char *source_file)
+static void generate_source_kernel_mp_filename (const uint opti_type, const uint opts_type, char *shared_dir, char *source_file)
{
if ((opti_type & OPTI_TYPE_BRUTE_FORCE) && (opts_type & OPTS_TYPE_PT_GENERATE_BE))
{
- snprintf (source_file, 255, "%s/OpenCL/markov_be.cl", install_dir);
+ snprintf (source_file, 255, "%s/OpenCL/markov_be.cl", shared_dir);
}
else
{
- snprintf (source_file, 255, "%s/OpenCL/markov_le.cl", install_dir);
+ snprintf (source_file, 255, "%s/OpenCL/markov_le.cl", shared_dir);
}
}
-static void generate_cached_kernel_mp_filename (const uint opti_type, const uint opts_type, char *install_dir, char *device_name, char *device_version, char *driver_version, int vendor_id, char *cached_file)
+static void generate_cached_kernel_mp_filename (const uint opti_type, const uint opts_type, char *profile_dir, char *device_name_chksum, char *cached_file)
{
if ((opti_type & OPTI_TYPE_BRUTE_FORCE) && (opts_type & OPTS_TYPE_PT_GENERATE_BE))
{
- snprintf (cached_file, 255, "%s/kernels/%d/markov_be.%s_%s_%s_%d.kernel", install_dir, vendor_id, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/markov_be.%s.kernel", profile_dir, device_name_chksum);
}
else
{
- snprintf (cached_file, 255, "%s/kernels/%d/markov_le.%s_%s_%s_%d.kernel", install_dir, vendor_id, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/markov_le.%s.kernel", profile_dir, device_name_chksum);
}
}
-static void generate_source_kernel_amp_filename (const uint attack_kern, char *install_dir, char *source_file)
+static void generate_source_kernel_amp_filename (const uint attack_kern, char *shared_dir, char *source_file)
{
- snprintf (source_file, 255, "%s/OpenCL/amp_a%d.cl", install_dir, attack_kern);
+ snprintf (source_file, 255, "%s/OpenCL/amp_a%d.cl", shared_dir, attack_kern);
}
-static void generate_cached_kernel_amp_filename (const uint attack_kern, char *install_dir, char *device_name, char *device_version, char *driver_version, int vendor_id, char *cached_file)
+static void generate_cached_kernel_amp_filename (const uint attack_kern, char *profile_dir, char *device_name_chksum, char *cached_file)
{
- snprintf (cached_file, 255, "%s/kernels/%d/amp_a%d.%s_%s_%s_%d.kernel", install_dir, vendor_id, attack_kern, device_name, device_version, driver_version, COMPTIME);
+ snprintf (cached_file, 255, "%s/kernels/amp_a%d.%s.kernel", profile_dir, attack_kern, device_name_chksum);
}
static uint convert_from_hex (char *line_buf, const uint line_len)
for (i = 0, j = 0; j < line_len; i += 1, j += 2)
{
- line_buf[i] = hex_to_char (&line_buf[j]);
+ line_buf[i] = hex_to_u8 ((const u8 *) &line_buf[j]);
}
memset (line_buf + i, 0, line_len - i);
for (i = 0, j = 5; j < line_len - 1; i += 1, j += 2)
{
- line_buf[i] = hex_to_char (&line_buf[j]);
+ line_buf[i] = hex_to_u8 ((const u8 *) &line_buf[j]);
}
memset (line_buf + i, 0, line_len - i);
{
uint cnt = 0;
- char *buf = (char *) mymalloc (BUFSIZ);
+ char *buf = (char *) mymalloc (BUFSIZ + 1);
size_t nread_tmp = 0;
fflush (stdout);
}
-static void gidd_to_pw_t (hc_device_param_t *device_param, const uint64_t gidd, pw_t *pw)
+static void gidd_to_pw_t (hc_device_param_t *device_param, const u64 gidd, pw_t *pw)
{
hc_clEnqueueReadBuffer (device_param->command_queue, device_param->d_pws_buf, CL_TRUE, gidd * sizeof (pw_t), sizeof (pw_t), pw, 0, NULL, NULL);
}
int debug_rule_len = 0; // -1 error
uint debug_plain_len = 0;
- unsigned char debug_plain_ptr[BLOCK_SIZE];
+ u8 debug_plain_ptr[BLOCK_SIZE];
// hash
- char out_buf[4096]; memset (out_buf, 0, sizeof (out_buf));
+ char out_buf[4096] = { 0 };
ascii_digest (out_buf, salt_pos, digest_pos);
uint gidvid = plain.gidvid;
uint il_pos = plain.il_pos;
- uint64_t crackpos = device_param->words_off;
+ u64 crackpos = device_param->words_off;
uint plain_buf[16];
- unsigned char *plain_ptr = (unsigned char *) plain_buf;
+ u8 *plain_ptr = (u8 *) plain_buf;
unsigned int plain_len = 0;
if (data.attack_mode == ATTACK_MODE_STRAIGHT)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
{
memset (debug_rule_buf, 0, sizeof (debug_rule_buf));
- debug_rule_len = gpu_rule_to_cpu_rule (debug_rule_buf, &data.gpu_rules_buf[off]);
+ debug_rule_len = kernel_rule_to_cpu_rule (debug_rule_buf, &data.kernel_rules_buf[off]);
}
// save plain
}
}
- plain_len = apply_rules (data.gpu_rules_buf[off].cmds, &plain_buf[0], &plain_buf[4], plain_len);
+ plain_len = apply_rules (data.kernel_rules_buf[off].cmds, &plain_buf[0], &plain_buf[4], plain_len);
crackpos += gidvid;
- crackpos *= data.gpu_rules_cnt;
+ crackpos *= data.kernel_rules_cnt;
crackpos += device_param->innerloop_pos + il_pos;
if (plain_len > data.pw_max) plain_len = data.pw_max;
}
else if (data.attack_mode == ATTACK_MODE_COMBI)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- uint64_t l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
- uint64_t r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
+ u64 l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
+ u64 r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
uint l_start = device_param->kernel_params_mp_l_buf32[5];
uint r_start = device_param->kernel_params_mp_r_buf32[5];
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
plain_len = pw.pw_len;
- uint64_t off = device_param->kernel_params_mp_buf64[3] + il_pos;
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
uint start = 0;
uint stop = device_param->kernel_params_mp_buf32[4];
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
plain_len = pw.pw_len;
- uint64_t off = device_param->kernel_params_mp_buf64[3] + il_pos;
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
uint start = 0;
uint stop = device_param->kernel_params_mp_buf32[4];
}
}
- if (data.opti_type & OPTI_TYPE_BRUTE_FORCE) // lots of optimizations can happen here
+ if (data.attack_mode == ATTACK_MODE_BF)
{
- if (data.opti_type & OPTI_TYPE_SINGLE_HASH)
+ if (data.opti_type & OPTI_TYPE_BRUTE_FORCE) // lots of optimizations can happen here
{
- if (data.opti_type & OPTI_TYPE_APPENDED_SALT)
+ if (data.opti_type & OPTI_TYPE_SINGLE_HASH)
{
- plain_len = plain_len - data.salts_buf[0].salt_len;
+ if (data.opti_type & OPTI_TYPE_APPENDED_SALT)
+ {
+ plain_len = plain_len - data.salts_buf[0].salt_len;
+ }
}
- }
- if (data.opts_type & OPTS_TYPE_PT_UNICODE)
- {
- for (uint i = 0, j = 0; i < plain_len; i += 2, j += 1)
+ if (data.opts_type & OPTS_TYPE_PT_UNICODE)
{
- plain_ptr[j] = plain_ptr[i];
- }
+ for (uint i = 0, j = 0; i < plain_len; i += 2, j += 1)
+ {
+ plain_ptr[j] = plain_ptr[i];
+ }
- plain_len = plain_len / 2;
+ plain_len = plain_len / 2;
+ }
}
}
hc_clEnqueueReadBuffer (device_param->command_queue, device_param->d_result, CL_TRUE, 0, device_param->size_results, device_param->result, 0, NULL, NULL);
- for (uint i = 0; i < GPU_THREADS; i++) if (device_param->result[i] == 1) found = 1;
+ for (uint i = 0; i < KERNEL_THREADS; i++) if (device_param->result[i] == 1) found = 1;
if (found == 1)
{
if (data.opts_type & OPTS_TYPE_PT_NEVERCRACK)
{
- // we need to reset cracked state on the gpu
+ // we need to reset cracked state on the device
// otherwise host thinks again and again the hash was cracked
// and returns invalid password each time
{
char *hashfile = data.hashfile;
- char new_hashfile[256];
- char old_hashfile[256];
-
- memset (new_hashfile, 0, sizeof (new_hashfile));
- memset (old_hashfile, 0, sizeof (old_hashfile));
+ char new_hashfile[256] = { 0 };
+ char old_hashfile[256] = { 0 };
snprintf (new_hashfile, 255, "%s.new", hashfile);
snprintf (old_hashfile, 255, "%s.old", hashfile);
if (data.hash_mode != 2500)
{
- char out_buf[4096];
-
- memset (out_buf, 0, sizeof (out_buf));
+ char out_buf[4096] = { 0 };
if (data.username == 1)
{
unlink (old_hashfile);
}
-static float find_gpu_blocks_div (const uint64_t total_left, const uint gpu_blocks_all)
+static float find_kernel_blocks_div (const u64 total_left, const uint kernel_blocks_all)
{
- // function called only in case gpu_blocks_all > words_left)
+ // function called only in case kernel_blocks_all > words_left)
- float gpu_blocks_div = (float) (total_left) / gpu_blocks_all;
+ float kernel_blocks_div = (float) (total_left) / kernel_blocks_all;
- gpu_blocks_div += gpu_blocks_div / 100;
+ kernel_blocks_div += kernel_blocks_div / 100;
- uint32_t gpu_blocks_new = (uint32_t) (gpu_blocks_all * gpu_blocks_div);
+ u32 kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
- while (gpu_blocks_new < total_left)
+ while (kernel_blocks_new < total_left)
{
- gpu_blocks_div += gpu_blocks_div / 100;
+ kernel_blocks_div += kernel_blocks_div / 100;
- gpu_blocks_new = (uint32_t) (gpu_blocks_all * gpu_blocks_div);
+ kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
}
if (data.quiet == 0)
fflush (stdout);
}
- if ((gpu_blocks_all * gpu_blocks_div) < 8) return 1;
+ if ((kernel_blocks_all * kernel_blocks_div) < 8) return 1;
- return gpu_blocks_div;
+ return kernel_blocks_div;
}
static void run_kernel (const uint kern_run, hc_device_param_t *device_param, const uint num)
device_param->kernel_params_buf32[30] = data.combs_mode;
device_param->kernel_params_buf32[31] = num;
- uint gpu_threads = device_param->gpu_threads;
+ uint kernel_threads = device_param->kernel_threads;
- while (num_elements % gpu_threads) num_elements++;
+ while (num_elements % kernel_threads) num_elements++;
cl_kernel kernel = NULL;
if ((data.opts_type & OPTS_TYPE_PT_BITSLICE) && (data.attack_mode == ATTACK_MODE_BF))
{
- const size_t global_work_size[3] = { num_elements, 32, 1 };
- const size_t local_work_size[3] = { gpu_threads / 32, 32, 1 };
+ const size_t global_work_size[3] = { num_elements, 32, 1 };
+ const size_t local_work_size[3] = { kernel_threads / 32, 32, 1 };
hc_clEnqueueNDRangeKernel (device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, NULL);
}
else
{
- const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { gpu_threads, 1, 1 };
+ const size_t global_work_size[3] = { num_elements, 1, 1 };
+ const size_t local_work_size[3] = { kernel_threads, 1, 1 };
hc_clEnqueueNDRangeKernel (device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
}
}
// causes problems with special threads like in bcrypt
- // const uint gpu_threads = device_param->gpu_threads;
+ // const uint kernel_threads = device_param->kernel_threads;
- const uint gpu_threads = GPU_THREADS;
+ const uint kernel_threads = KERNEL_THREADS;
- while (num_elements % gpu_threads) num_elements++;
+ while (num_elements % kernel_threads) num_elements++;
cl_kernel kernel = NULL;
}
const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { gpu_threads, 1, 1 };
+ const size_t local_work_size[3] = { kernel_threads, 1, 1 };
hc_clEnqueueNDRangeKernel (device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
{
uint num_elements = num;
- uint gpu_threads = device_param->gpu_threads;
+ uint kernel_threads = device_param->kernel_threads;
- while (num_elements % gpu_threads) num_elements++;
+ while (num_elements % kernel_threads) num_elements++;
cl_kernel kernel = device_param->kernel_tb;
const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { gpu_threads, 1, 1 };
+ const size_t local_work_size[3] = { kernel_threads, 1, 1 };
hc_clEnqueueNDRangeKernel (device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
{
const uint num_elements = 1024; // fixed
- const uint gpu_threads = 32;
+ const uint kernel_threads = 32;
cl_kernel kernel = device_param->kernel_tm;
const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { gpu_threads, 1, 1 };
+ const size_t local_work_size[3] = { kernel_threads, 1, 1 };
hc_clEnqueueNDRangeKernel (device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
device_param->kernel_params_amp_buf32[6] = num_elements;
// causes problems with special threads like in bcrypt
- // const uint gpu_threads = device_param->gpu_threads;
+ // const uint kernel_threads = device_param->kernel_threads;
- const uint gpu_threads = GPU_THREADS;
+ const uint kernel_threads = KERNEL_THREADS;
- while (num_elements % gpu_threads) num_elements++;
+ while (num_elements % kernel_threads) num_elements++;
cl_kernel kernel = device_param->kernel_amp;
hc_clSetKernelArg (kernel, 6, sizeof (cl_uint), device_param->kernel_params_amp[6]);
const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { gpu_threads, 1, 1 };
+ const size_t local_work_size[3] = { kernel_threads, 1, 1 };
hc_clEnqueueNDRangeKernel (device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
static void run_kernel_bzero (hc_device_param_t *device_param, cl_mem buf, const uint size)
{
- if (data.vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
+ // So far tested, amd is the only supporting this OpenCL 1.2 function without segfaulting
+
const cl_uchar zero = 0;
hc_clEnqueueFillBuffer (device_param->command_queue, buf, &zero, sizeof (cl_uchar), 0, size, 0, NULL, NULL);
}
-
- if (data.vendor_id == VENDOR_ID_NV)
+ else
{
// NOTE: clEnqueueFillBuffer () always fails with -59
- // IOW, it's not supported by Nvidia ForceWare <= 352.21,
+ // IOW, it's not supported by Nvidia ForceWare <= 352.21, also pocl segfaults
// How's that possible, OpenCL 1.2 support is advertised??
// We need to workaround...
}
else if (data.attack_kern == ATTACK_KERN_BF)
{
- const uint64_t off = device_param->words_off;
+ const u64 off = device_param->words_off;
device_param->kernel_params_mp_l_buf64[3] = off;
static void run_cracker (hc_device_param_t *device_param, const uint pw_cnt, const uint pws_cnt)
{
- const uint gpu_loops = data.gpu_loops;
+ const uint kernel_loops = data.kernel_loops;
// init speed timer
uint innerloop_step = 0;
uint innerloop_cnt = 0;
- if (data.attack_exec == ATTACK_EXEC_ON_GPU) innerloop_step = gpu_loops;
- else innerloop_step = 1;
+ if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL) innerloop_step = kernel_loops;
+ else innerloop_step = 1;
- if (data.attack_kern == ATTACK_KERN_STRAIGHT) innerloop_cnt = data.gpu_rules_cnt;
+ if (data.attack_kern == ATTACK_KERN_STRAIGHT) innerloop_cnt = data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) innerloop_cnt = data.combs_cnt;
else if (data.attack_kern == ATTACK_KERN_BF) innerloop_cnt = data.bfs_cnt;
if (run_rule_engine (data.rule_len_r, data.rule_buf_r))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = _old_apply_rule (data.rule_buf_r, data.rule_len_r, line_buf, line_len, rule_buf_out);
line_len = MIN (line_len, PW_DICTMAX);
- char *ptr = (char *) device_param->combs_buf[i].i;
+ u8 *ptr = (u8 *) device_param->combs_buf[i].i;
memcpy (ptr, line_buf_new, line_len);
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_r_buf64[3] = off;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_buf64[3] = off;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_buf64[3] = off;
if (data.attack_mode == ATTACK_MODE_STRAIGHT)
{
- hc_clEnqueueCopyBuffer (device_param->command_queue, device_param->d_rules, device_param->d_rules_c, innerloop_pos * sizeof (gpu_rule_t), 0, innerloop_left * sizeof (gpu_rule_t), 0, NULL, NULL);
+ hc_clEnqueueCopyBuffer (device_param->command_queue, device_param->d_rules, device_param->d_rules_c, innerloop_pos * sizeof (kernel_rule_t), 0, innerloop_left * sizeof (kernel_rule_t), 0, NULL, NULL);
}
else if (data.attack_mode == ATTACK_MODE_COMBI)
{
hc_clEnqueueCopyBuffer (device_param->command_queue, device_param->d_combs, device_param->d_combs_c, 0, 0, innerloop_left * sizeof (comb_t), 0, NULL, NULL);
}
- if (data.attack_exec == ATTACK_EXEC_ON_GPU)
+ if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (data.attack_mode == ATTACK_MODE_BF)
{
uint iter = salt_buf->salt_iter;
- for (uint loop_pos = 0; loop_pos < iter; loop_pos += gpu_loops)
+ for (uint loop_pos = 0; loop_pos < iter; loop_pos += kernel_loops)
{
uint loop_left = iter - loop_pos;
- loop_left = MIN (loop_left, gpu_loops);
+ loop_left = MIN (loop_left, kernel_loops);
device_param->kernel_params_buf32[25] = loop_pos;
device_param->kernel_params_buf32[26] = loop_left;
* progress
*/
- uint64_t perf_sum_all = (uint64_t) pw_cnt * (uint64_t) innerloop_left;
+ u64 perf_sum_all = (u64) pw_cnt * (u64) innerloop_left;
hc_thread_mutex_lock (mux_counter);
return;
}
-static void get_next_word_lm (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_lm (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr >= 'a' && *ptr <= 'z') *ptr -= 0x20;
*len = sz;
}
-static void get_next_word_uc (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_uc (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr >= 'a' && *ptr <= 'z') *ptr -= 0x20;
*len = sz;
}
-static void get_next_word_std (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_std (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr != '\n') continue;
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
#ifdef _POSIX
-static uint64_t count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, size_t *dictstat_nmemb)
+static u64 count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, size_t *dictstat_nmemb)
#endif
#ifdef _WIN
-static uint64_t count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, uint *dictstat_nmemb)
+static u64 count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, uint *dictstat_nmemb)
#endif
{
hc_signal (NULL);
{
if (d_cache)
{
- uint64_t cnt = d_cache->cnt;
+ u64 cnt = d_cache->cnt;
- uint64_t keyspace = cnt;
+ u64 keyspace = cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
- keyspace *= data.gpu_rules_cnt;
+ keyspace *= data.kernel_rules_cnt;
}
else if (data.attack_kern == ATTACK_KERN_COMBI)
{
time_t now = 0;
time_t prev = 0;
- uint64_t comp = 0;
- uint64_t cnt = 0;
- uint64_t cnt2 = 0;
+ u64 comp = 0;
+ u64 cnt = 0;
+ u64 cnt2 = 0;
while (!feof (fd))
{
comp += wl_data->cnt;
- uint32_t i = 0;
+ u32 i = 0;
while (i < wl_data->cnt)
{
- uint32_t len;
- uint32_t off;
+ u32 len;
+ u32 off;
get_next_word_func (wl_data->buf + i, wl_data->cnt - i, &len, &off);
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
{
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
- cnt += data.gpu_rules_cnt;
+ cnt += data.kernel_rules_cnt;
}
else if (data.attack_kern == ATTACK_KERN_COMBI)
{
memcpy (p2->hi1, p1->hi1, 64 * sizeof (uint));
}
-static uint pw_add_to_hc1 (hc_device_param_t *device_param, const uint8_t *pw_buf, const uint pw_len)
+static uint pw_add_to_hc1 (hc_device_param_t *device_param, const u8 *pw_buf, const uint pw_len)
{
if (data.devices_status == STATUS_BYPASS) return 0;
uint cache_cnt = pw_cache->cnt;
- uint8_t *pw_hc1 = pw_cache->pw_buf.hc1[cache_cnt];
+ u8 *pw_hc1 = pw_cache->pw_buf.hc1[cache_cnt];
memcpy (pw_hc1, pw_buf, pw_len);
uint runtime_check = 0;
uint remove_check = 0;
uint status_check = 0;
- uint hwmon_check = 0;
uint restore_check = 0;
uint restore_left = data.restore_timer;
uint remove_left = data.remove_timer;
uint status_left = data.status_timer;
+ #ifdef HAVE_HWMON
+ uint hwmon_check = 0;
+
// these variables are mainly used for fan control (AMD only)
int *fan_speed_chgd = (int *) mycalloc (data.devices_cnt, sizeof (int));
int *temp_diff_old = (int *) mycalloc (data.devices_cnt, sizeof (int));
int *temp_diff_sum = (int *) mycalloc (data.devices_cnt, sizeof (int));
+ #ifdef HAVE_ADL
int temp_threshold = 1; // degrees celcius
int fan_speed_min = 15; // in percentage
int fan_speed_max = 100;
+ #endif // HAVE_ADL
time_t last_temp_check_time;
+ #endif // HAVE_HWMON
uint sleep_time = 1;
status_check = 1;
}
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
time (&last_temp_check_time);
hwmon_check = 1;
}
+ #endif
- if ((runtime_check == 0) && (remove_check == 0) && (status_check == 0) && (hwmon_check == 0) && (restore_check == 0))
+ if ((runtime_check == 0) && (remove_check == 0) && (status_check == 0) && (restore_check == 0))
{
+ #ifdef HAVE_HWMON
+ if (hwmon_check == 0)
+ #endif
return (p);
}
if (data.devices_status != STATUS_RUNNING) continue;
+ #ifdef HAVE_HWMON
if (hwmon_check == 1)
{
hc_thread_mutex_lock (mux_adl);
if (Ta == 0) Ta = 1;
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- const int temperature = hm_get_temperature_with_device_id (i);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+
+ const int temperature = hm_get_temperature_with_device_id (device_id);
if (temperature > (int) data.gpu_temp_abort)
{
- log_error ("ERROR: Temperature limit on GPU %d reached, aborting...", i + 1);
+ log_error ("ERROR: Temperature limit on GPU %d reached, aborting...", device_id + 1);
if (data.devices_status != STATUS_QUIT) myabort ();
break;
}
+ #ifdef HAVE_ADL
const int gpu_temp_retain = data.gpu_temp_retain;
if (gpu_temp_retain) // VENDOR_ID_AMD implied
{
- if (data.hm_device[i].fan_supported == 1)
+ if (data.hm_device[device_id].fan_supported == 1)
{
int temp_cur = temperature;
int temp_diff_new = gpu_temp_retain - temp_cur;
- temp_diff_sum[i] = temp_diff_sum[i] + temp_diff_new;
+ temp_diff_sum[device_id] = temp_diff_sum[device_id] + temp_diff_new;
// calculate Ta value (time difference in seconds between the last check and this check)
// PID controller (3-term controller: proportional - Kp, integral - Ki, derivative - Kd)
- int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[i] + Kd * ((float)(temp_diff_new - temp_diff_old[i])) / Ta);
+ int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[device_id] + Kd * ((float)(temp_diff_new - temp_diff_old[device_id])) / Ta);
if (abs (fan_diff_required) >= temp_threshold)
{
- const int fan_speed_cur = hm_get_fanspeed_with_device_id (i);
+ const int fan_speed_cur = hm_get_fanspeed_with_device_id (device_id);
int fan_speed_level = fan_speed_cur;
- if (fan_speed_chgd[i] == 0) fan_speed_level = temp_cur;
+ if (fan_speed_chgd[device_id] == 0) fan_speed_level = temp_cur;
int fan_speed_new = fan_speed_level - fan_diff_required;
if (fan_speed_new != fan_speed_cur)
{
- int freely_change_fan_speed = (fan_speed_chgd[i] == 1);
+ int freely_change_fan_speed = (fan_speed_chgd[device_id] == 1);
int fan_speed_must_change = (fan_speed_new > fan_speed_cur);
if ((freely_change_fan_speed == 1) || (fan_speed_must_change == 1))
{
- hm_set_fanspeed_with_device_id_amd (i, fan_speed_new);
+ hm_set_fanspeed_with_device_id_amd (device_id, fan_speed_new);
- fan_speed_chgd[i] = 1;
+ fan_speed_chgd[device_id] = 1;
}
- temp_diff_old[i] = temp_diff_new;
+ temp_diff_old[device_id] = temp_diff_new;
}
}
}
}
+ #endif // HAVE_ADL
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
if (restore_check == 1)
{
}
}
+ #ifdef HAVE_HWMON
myfree (fan_speed_chgd);
myfree (temp_diff_old);
myfree (temp_diff_sum);
+ #endif
p = NULL;
int (*parse_func) (char *, uint, hash_t *) = data.parse_func;
// buffers
- hash_t hash_buf;
-
- memset (&hash_buf, 0, sizeof (hash_buf));
+ hash_t hash_buf = { 0, 0, 0, 0, 0 };
hash_buf.digest = mymalloc (dgst_size);
pke[i] = byte_swap_32 (wpa->pke[i]);
}
- unsigned char mac1[6];
- unsigned char mac2[6];
+ u8 mac1[6];
+ u8 mac2[6];
memcpy (mac1, pke_ptr + 23, 6);
memcpy (mac2, pke_ptr + 29, 6);
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != (unsigned char) hex_to_char (&mac1_pos[j]))
+ if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
cracked = 0;
break;
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != (unsigned char) hex_to_char (&mac2_pos[j]))
+ if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
cracked = 0;
break;
return (p);
}
-static uint get_work (hc_device_param_t *device_param, const uint64_t max)
+static uint get_work (hc_device_param_t *device_param, const u64 max)
{
hc_thread_mutex_lock (mux_dispatcher);
- const uint64_t words_cur = data.words_cur;
- const uint64_t words_base = (data.limit == 0) ? data.words_base : data.limit;
+ const u64 words_cur = data.words_cur;
+ const u64 words_base = (data.limit == 0) ? data.words_base : data.limit;
device_param->words_off = words_cur;
- const uint64_t words_left = words_base - words_cur;
+ const u64 words_left = words_base - words_cur;
- if (data.gpu_blocks_all > words_left)
+ if (data.kernel_blocks_all > words_left)
{
- if (data.gpu_blocks_div == 0)
+ if (data.kernel_blocks_div == 0)
{
- data.gpu_blocks_div = find_gpu_blocks_div (words_left, data.gpu_blocks_all);
+ data.kernel_blocks_div = find_kernel_blocks_div (words_left, data.kernel_blocks_all);
}
}
- if (data.gpu_blocks_div)
+ if (data.kernel_blocks_div)
{
- if (device_param->gpu_blocks == device_param->gpu_blocks_user)
+ if (device_param->kernel_blocks == device_param->kernel_blocks_user)
{
- const uint32_t gpu_blocks_new = (float) device_param->gpu_blocks * data.gpu_blocks_div;
- const uint32_t gpu_power_new = gpu_blocks_new;
+ const u32 kernel_blocks_new = (float) device_param->kernel_blocks * data.kernel_blocks_div;
+ const u32 kernel_power_new = kernel_blocks_new;
- if (gpu_blocks_new < device_param->gpu_blocks)
+ if (kernel_blocks_new < device_param->kernel_blocks)
{
- device_param->gpu_blocks = gpu_blocks_new;
- device_param->gpu_power = gpu_power_new;
+ device_param->kernel_blocks = kernel_blocks_new;
+ device_param->kernel_power = kernel_power_new;
}
}
}
- const uint gpu_blocks = device_param->gpu_blocks;
+ const uint kernel_blocks = device_param->kernel_blocks;
- uint work = MIN (words_left, gpu_blocks);
+ uint work = MIN (words_left, kernel_blocks);
work = MIN (work, max);
{
hc_device_param_t *device_param = (hc_device_param_t *) p;
+ if (device_param->skipped) return NULL;
+
const uint attack_kern = data.attack_kern;
- const uint gpu_blocks = device_param->gpu_blocks;
+ const uint kernel_blocks = device_param->kernel_blocks;
while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
uint words_cur = 0;
- while (words_cur < gpu_blocks)
+ while (words_cur < kernel_blocks)
{
char buf[BUFSIZ];
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
- data.words_progress_rejected[salt_pos] += data.gpu_rules_cnt;
+ data.words_progress_rejected[salt_pos] += data.kernel_rules_cnt;
}
hc_thread_mutex_unlock (mux_counter);
}
}
- device_param->pw_add (device_param, (uint8_t *) line_buf, line_len);
+ device_param->pw_add (device_param, (u8 *) line_buf, line_len);
words_cur++;
device_param->pws_cnt = pws_cnt;
- if (pws_cnt == device_param->gpu_power_user) break;
+ if (pws_cnt == device_param->kernel_power_user) break;
}
const uint pw_cnt = device_param->pw_cnt;
{
hc_device_param_t *device_param = (hc_device_param_t *) p;
+ if (device_param->skipped) return NULL;
+
const uint attack_mode = data.attack_mode;
const uint attack_kern = data.attack_kern;
if (work == 0) break;
- const uint64_t words_off = device_param->words_off;
- const uint64_t words_fin = words_off + work;
+ const u64 words_off = device_param->words_off;
+ const u64 words_fin = words_off + work;
const uint pw_cnt = work;
const uint pws_cnt = work;
wl_data->cnt = 0;
wl_data->pos = 0;
- uint64_t words_cur = 0;
+ u64 words_cur = 0;
while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
- uint64_t words_off = 0;
- uint64_t words_fin = 0;
+ u64 words_off = 0;
+ u64 words_fin = 0;
- uint64_t max = -1;
+ u64 max = -1;
while (max)
{
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
- data.words_progress_rejected[salt_pos] += data.gpu_rules_cnt;
+ data.words_progress_rejected[salt_pos] += data.kernel_rules_cnt;
}
hc_thread_mutex_unlock (mux_counter);
}
}
- device_param->pw_add (device_param, (uint8_t *) line_buf, line_len);
+ device_param->pw_add (device_param, (u8 *) line_buf, line_len);
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
device_param->pws_cnt = pws_cnt;
- if (pws_cnt == device_param->gpu_power_user) break;
+ if (pws_cnt == device_param->kernel_power_user) break;
}
const uint pw_cnt = device_param->pw_cnt;
return NULL;
}
-static void weak_hash_check (hc_device_param_t *device_param, const uint salt_pos, const uint gpu_loops)
+static void weak_hash_check (hc_device_param_t *device_param, const uint salt_pos, const uint kernel_loops)
{
salt_t *salt_buf = &data.salts_buf[salt_pos];
device_param->kernel_params_buf32[30] = 0;
device_param->kernel_params_buf32[31] = 1;
- char *dictfile_old = data.dictfile;
- char *dictfile2_old = data.dictfile2;
- char *mask_old = data.mask;
+ char *dictfile_old = data.dictfile;
const char *weak_hash_check = "weak-hash-check";
- data.dictfile = (char *) weak_hash_check;
- data.dictfile2 = (char *) weak_hash_check;
- data.mask = (char *) weak_hash_check;
+ data.dictfile = (char *) weak_hash_check;
+
+ uint cmd0_rule_old = data.kernel_rules_buf[0].cmds[0];
+
+ data.kernel_rules_buf[0].cmds[0] = 0;
/**
* run the kernel
*/
- if (data.attack_exec == ATTACK_EXEC_ON_GPU)
+ if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
run_kernel (KERN_RUN_1, device_param, 1);
}
const uint iter = salt_buf->salt_iter;
- for (uint loop_pos = 0; loop_pos < iter; loop_pos += gpu_loops)
+ for (uint loop_pos = 0; loop_pos < iter; loop_pos += kernel_loops)
{
uint loop_left = iter - loop_pos;
- loop_left = MIN (loop_left, gpu_loops);
+ loop_left = MIN (loop_left, kernel_loops);
device_param->kernel_params_buf32[25] = loop_pos;
device_param->kernel_params_buf32[26] = loop_left;
device_param->kernel_params_buf32[30] = 0;
device_param->kernel_params_buf32[31] = 0;
- data.dictfile = dictfile_old;
- data.dictfile2 = dictfile2_old;
- data.mask = mask_old;
+ data.dictfile = dictfile_old;
+
+ data.kernel_rules_buf[0].cmds[0] = cmd0_rule_old;
}
// hlfmt hashcat
// wrapper around mymalloc for ADL
+#if defined(HAVE_HWMON) && defined(HAVE_ADL)
void *__stdcall ADL_Main_Memory_Alloc (const int iSize)
{
return mymalloc (iSize);
}
+#endif
-static uint generate_bitmaps (const uint digests_cnt, const uint dgst_size, const uint dgst_shifts, char *digests_buf_ptr, const uint bitmap_mask, const uint bitmap_size, uint *bitmap_a, uint *bitmap_b, uint *bitmap_c, uint *bitmap_d, const uint64_t collisions_max)
+static uint generate_bitmaps (const uint digests_cnt, const uint dgst_size, const uint dgst_shifts, char *digests_buf_ptr, const uint bitmap_mask, const uint bitmap_size, uint *bitmap_a, uint *bitmap_b, uint *bitmap_c, uint *bitmap_d, const u64 collisions_max)
{
- uint64_t collisions = 0;
+ u64 collisions = 0;
const uint dgst_pos0 = data.dgst_pos0;
const uint dgst_pos1 = data.dgst_pos1;
digests_buf_ptr += dgst_size;
- const uint val0 = 1 << (digest_ptr[dgst_pos0] & 0x1f);
- const uint val1 = 1 << (digest_ptr[dgst_pos1] & 0x1f);
- const uint val2 = 1 << (digest_ptr[dgst_pos2] & 0x1f);
- const uint val3 = 1 << (digest_ptr[dgst_pos3] & 0x1f);
+ const uint val0 = 1u << (digest_ptr[dgst_pos0] & 0x1f);
+ const uint val1 = 1u << (digest_ptr[dgst_pos1] & 0x1f);
+ const uint val2 = 1u << (digest_ptr[dgst_pos2] & 0x1f);
+ const uint val3 = 1u << (digest_ptr[dgst_pos3] & 0x1f);
const uint idx0 = (digest_ptr[dgst_pos0] >> dgst_shifts) & bitmap_mask;
const uint idx1 = (digest_ptr[dgst_pos1] >> dgst_shifts) & bitmap_mask;
putenv ((char *) "DISPLAY=:0");
}
- /*
if (getenv ("GPU_MAX_ALLOC_PERCENT") == NULL)
putenv ((char *) "GPU_MAX_ALLOC_PERCENT=100");
+ if (getenv ("CPU_MAX_ALLOC_PERCENT") == NULL)
+ putenv ((char *) "CPU_MAX_ALLOC_PERCENT=100");
+
if (getenv ("GPU_USE_SYNC_OBJECTS") == NULL)
putenv ((char *) "GPU_USE_SYNC_OBJECTS=1");
- */
/**
* Real init
uint username = USERNAME;
uint remove = REMOVE;
uint remove_timer = REMOVE_TIMER;
- uint64_t skip = SKIP;
- uint64_t limit = LIMIT;
+ u64 skip = SKIP;
+ u64 limit = LIMIT;
uint keyspace = KEYSPACE;
uint potfile_disable = POTFILE_DISABLE;
uint debug_mode = DEBUG_MODE;
uint increment = INCREMENT;
uint increment_min = INCREMENT_MIN;
uint increment_max = INCREMENT_MAX;
+ #ifndef OSX
char *cpu_affinity = NULL;
- char *gpu_devices = NULL;
+ #endif
+ char *opencl_devices = NULL;
+ char *opencl_platforms = NULL;
+ char *opencl_device_types = NULL;
+ uint opencl_vector_width = OPENCL_VECTOR_WIDTH;
char *truecrypt_keyfiles = NULL;
uint workload_profile = WORKLOAD_PROFILE;
- uint gpu_accel = GPU_ACCEL;
- uint gpu_loops = GPU_LOOPS;
+ uint kernel_accel = KERNEL_ACCEL;
+ uint kernel_loops = KERNEL_LOOPS;
+ #ifdef HAVE_HWMON
uint gpu_temp_disable = GPU_TEMP_DISABLE;
uint gpu_temp_abort = GPU_TEMP_ABORT;
uint gpu_temp_retain = GPU_TEMP_RETAIN;
+ #ifdef HAVE_ADL
uint powertune_enable = POWERTUNE_ENABLE;
+ #endif
+ #endif
uint logfile_disable = LOGFILE_DISABLE;
uint segment_size = SEGMENT_SIZE;
uint scrypt_tmto = SCRYPT_TMTO;
#define IDX_MARKOV_THRESHOLD 't'
#define IDX_MARKOV_HCSTAT 0xff24
#define IDX_CPU_AFFINITY 0xff25
- #define IDX_GPU_DEVICES 'd'
+ #define IDX_OPENCL_DEVICES 'd'
+ #define IDX_OPENCL_PLATFORMS 0xff72
+ #define IDX_OPENCL_DEVICE_TYPES 0xff73
+ #define IDX_OPENCL_VECTOR_WIDTH 0xff74
#define IDX_WORKLOAD_PROFILE 'w'
- #define IDX_GPU_ACCEL 'n'
- #define IDX_GPU_LOOPS 'u'
+ #define IDX_KERNEL_ACCEL 'n'
+ #define IDX_KERNEL_LOOPS 'u'
#define IDX_GPU_TEMP_DISABLE 0xff29
#define IDX_GPU_TEMP_ABORT 0xff30
#define IDX_GPU_TEMP_RETAIN 0xff31
{"markov-classic", no_argument, 0, IDX_MARKOV_CLASSIC},
{"markov-threshold", required_argument, 0, IDX_MARKOV_THRESHOLD},
{"markov-hcstat", required_argument, 0, IDX_MARKOV_HCSTAT},
+ #ifndef OSX
{"cpu-affinity", required_argument, 0, IDX_CPU_AFFINITY},
- {"gpu-devices", required_argument, 0, IDX_GPU_DEVICES},
+ #endif
+ {"opencl-devices", required_argument, 0, IDX_OPENCL_DEVICES},
+ {"opencl-platforms", required_argument, 0, IDX_OPENCL_PLATFORMS},
+ {"opencl-device-types", required_argument, 0, IDX_OPENCL_DEVICE_TYPES},
+ {"opencl-vector-width", required_argument, 0, IDX_OPENCL_VECTOR_WIDTH},
{"workload-profile", required_argument, 0, IDX_WORKLOAD_PROFILE},
- {"gpu-accel", required_argument, 0, IDX_GPU_ACCEL},
- {"gpu-loops", required_argument, 0, IDX_GPU_LOOPS},
+ {"kernel-accel", required_argument, 0, IDX_KERNEL_ACCEL},
+ {"kernel-loops", required_argument, 0, IDX_KERNEL_LOOPS},
+ #ifdef HAVE_HWMON
{"gpu-temp-disable", no_argument, 0, IDX_GPU_TEMP_DISABLE},
{"gpu-temp-abort", required_argument, 0, IDX_GPU_TEMP_ABORT},
{"gpu-temp-retain", required_argument, 0, IDX_GPU_TEMP_RETAIN},
+ #ifdef HAVE_ADL
{"powertune-enable", no_argument, 0, IDX_POWERTUNE_ENABLE},
+ #endif
+ #endif // HAVE_HWMON
{"logfile-disable", no_argument, 0, IDX_LOGFILE_DISABLE},
{"truecrypt-keyfiles", required_argument, 0, IDX_TRUECRYPT_KEYFILES},
{"segment-size", required_argument, 0, IDX_SEGMENT_SIZE},
char **rp_files = (char **) mycalloc (argc, sizeof (char *));
- int option_index;
- int c;
+ int option_index = 0;
+ int c = -1;
optind = 1;
optopt = 0;
- option_index = 0;
while (((c = getopt_long (argc, argv, short_options, long_options, &option_index)) != -1) && optopt == 0)
{
}
/**
- * session
+ * session needs to be set, always!
*/
if (session == NULL) session = (char *) PROGNAME;
- size_t session_size = strlen (session) + 32;
+ /**
+ * folders, as discussed on https://github.com/hashcat/oclHashcat/issues/20
+ */
+
+ char *exec_path = get_exec_path ();
+
+ #ifdef LINUX
+
+ char *resolved_install_folder = realpath (INSTALL_FOLDER, NULL);
+ char *resolved_exec_path = realpath (exec_path, NULL);
+
+ char *install_dir = get_install_dir (resolved_exec_path);
+ char *profile_dir = NULL;
+ char *session_dir = NULL;
+ char *shared_dir = NULL;
+
+ if (strcmp (install_dir, resolved_install_folder) == 0)
+ {
+ struct passwd *pw = getpwuid (getuid ());
+
+ const char *homedir = pw->pw_dir;
+
+ profile_dir = get_profile_dir (homedir);
+ session_dir = get_session_dir (profile_dir);
+ shared_dir = strdup (SHARED_FOLDER);
+
+ mkdir (profile_dir, 0700);
+ mkdir (session_dir, 0700);
+ }
+ else
+ {
+ profile_dir = install_dir;
+ session_dir = install_dir;
+ shared_dir = install_dir;
+ }
+
+ myfree (resolved_install_folder);
+ myfree (resolved_exec_path);
+
+ #else
+
+ char *install_dir = get_install_dir (exec_path);
+ char *profile_dir = install_dir;
+ char *session_dir = install_dir;
+ char *shared_dir = install_dir;
+
+ #endif
+
+ data.install_dir = install_dir;
+ data.profile_dir = profile_dir;
+ data.session_dir = session_dir;
+ data.shared_dir = shared_dir;
+
+ myfree (exec_path);
+
+ /**
+ * kernel cache, we need to make sure folder exist
+ */
+
+ int kernels_folder_size = strlen (profile_dir) + 1 + 7 + 1 + 1;
+
+ char *kernels_folder = (char *) mymalloc (kernels_folder_size);
+
+ snprintf (kernels_folder, kernels_folder_size - 1, "%s/kernels", profile_dir);
+
+ mkdir (kernels_folder, 0700);
+
+ myfree (kernels_folder);
+
+ /**
+ * session
+ */
+
+ size_t session_size = strlen (session_dir) + 1 + strlen (session) + 32;
data.session = session;
char *eff_restore_file = (char *) mymalloc (session_size);
char *new_restore_file = (char *) mymalloc (session_size);
- snprintf (eff_restore_file, session_size - 1, "%s.restore", session);
- snprintf (new_restore_file, session_size - 1, "%s.restore.new", session);
+ snprintf (eff_restore_file, session_size - 1, "%s/%s.restore", data.session_dir, session);
+ snprintf (new_restore_file, session_size - 1, "%s/%s.restore.new", data.session_dir, session);
data.eff_restore_file = eff_restore_file;
data.new_restore_file = new_restore_file;
uint hash_mode_chgd = 0;
uint runtime_chgd = 0;
- uint gpu_loops_chgd = 0;
- uint gpu_accel_chgd = 0;
+ uint kernel_loops_chgd = 0;
+ uint kernel_accel_chgd = 0;
uint attack_mode_chgd = 0;
uint outfile_format_chgd = 0;
uint rp_gen_seed_chgd = 0;
uint remove_timer_chgd = 0;
uint increment_min_chgd = 0;
uint increment_max_chgd = 0;
- uint gpu_temp_abort_chgd = 0;
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
uint gpu_temp_retain_chgd = 0;
+ uint gpu_temp_abort_chgd = 0;
+ #endif
optind = 1;
optopt = 0;
case IDX_HEX_CHARSET: hex_charset = 1; break;
case IDX_HEX_SALT: hex_salt = 1; break;
case IDX_HEX_WORDLIST: hex_wordlist = 1; break;
+ #ifndef OSX
case IDX_CPU_AFFINITY: cpu_affinity = optarg; break;
- case IDX_GPU_DEVICES: gpu_devices = optarg; break;
+ #endif
+ case IDX_OPENCL_DEVICES: opencl_devices = optarg; break;
+ case IDX_OPENCL_PLATFORMS: opencl_platforms = optarg; break;
+ case IDX_OPENCL_DEVICE_TYPES:
+ opencl_device_types = optarg; break;
+ case IDX_OPENCL_VECTOR_WIDTH:
+ opencl_vector_width = atoi (optarg); break;
case IDX_WORKLOAD_PROFILE: workload_profile = atoi (optarg); break;
- case IDX_GPU_ACCEL: gpu_accel = atoi (optarg);
- gpu_accel_chgd = 1; break;
- case IDX_GPU_LOOPS: gpu_loops = atoi (optarg);
- gpu_loops_chgd = 1; break;
+ case IDX_KERNEL_ACCEL: kernel_accel = atoi (optarg);
+ kernel_accel_chgd = 1; break;
+ case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
+ kernel_loops_chgd = 1; break;
+ #ifdef HAVE_HWMON
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
- case IDX_GPU_TEMP_ABORT: gpu_temp_abort_chgd = 1;
- gpu_temp_abort = atoi (optarg); break;
- case IDX_GPU_TEMP_RETAIN: gpu_temp_retain_chgd = 1;
- gpu_temp_retain = atoi (optarg); break;
+ case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg);
+ #ifdef HAVE_ADL
+ gpu_temp_abort_chgd = 1;
+ #endif
+ break;
+ case IDX_GPU_TEMP_RETAIN: gpu_temp_retain = atoi (optarg);
+ #ifdef HAVE_ADL
+ gpu_temp_retain_chgd = 1;
+ #endif
+ break;
+ #ifdef HAVE_ADL
case IDX_POWERTUNE_ENABLE: powertune_enable = 1; break;
+ #endif
+ #endif // HAVE_HWMON
case IDX_LOGFILE_DISABLE: logfile_disable = 1; break;
case IDX_TRUECRYPT_KEYFILES: truecrypt_keyfiles = optarg; break;
case IDX_SEGMENT_SIZE: segment_size = atoi (optarg); break;
return (-1);
}
- if (hash_mode_chgd && hash_mode > 12800) // just added to remove compiler warnings for hash_mode_chgd
+ if (hash_mode_chgd && hash_mode > 13000) // just added to remove compiler warnings for hash_mode_chgd
{
log_error ("ERROR: Invalid hash-type specified");
return (-1);
}
- if (gpu_accel_chgd == 1)
+ if (kernel_accel_chgd == 1)
{
if (workload_profile != WORKLOAD_PROFILE)
{
- log_error ("ERROR: gpu-accel parameter can only be set when workload-profile %i is used", WORKLOAD_PROFILE);
+ log_error ("ERROR: kernel-accel parameter can only be set when workload-profile %i is used", WORKLOAD_PROFILE);
return (-1);
}
- if (gpu_accel < 1)
+ if (kernel_accel < 1)
{
- log_error ("ERROR: Invalid gpu-accel specified");
+ log_error ("ERROR: Invalid kernel-accel specified");
return (-1);
}
- if (gpu_accel > 800)
+ if (kernel_accel > 800)
{
- log_error ("ERROR: Invalid gpu-accel specified");
+ log_error ("ERROR: Invalid kernel-accel specified");
return (-1);
}
}
- if (gpu_loops_chgd == 1)
+ if (kernel_loops_chgd == 1)
{
if (workload_profile != WORKLOAD_PROFILE)
{
- log_error ("ERROR: gpu-loops parameter can only be set when workload-profile %i is used", WORKLOAD_PROFILE);
+ log_error ("ERROR: kernel-loops parameter can only be set when workload-profile %i is used", WORKLOAD_PROFILE);
return (-1);
}
- if (gpu_loops < 1)
+ if (kernel_loops < 1)
{
- log_error ("ERROR: Invalid gpu-loops specified");
+ log_error ("ERROR: Invalid kernel-loops specified");
return (-1);
}
- if (gpu_loops > 1024)
+ if (kernel_loops > 1024)
{
- log_error ("ERROR: Invalid gpu-loops specified");
+ log_error ("ERROR: Invalid kernel-loops specified");
return (-1);
}
return (-1);
}
+ if ((opencl_vector_width != 0) && (opencl_vector_width != 1) && (opencl_vector_width != 2) && (opencl_vector_width != 4) && (opencl_vector_width != 8))
+ {
+ log_error ("ERROR: opencl-vector-width %i not allowed", opencl_vector_width);
+
+ return (-1);
+ }
+
if (show == 1 || left == 1)
{
attack_mode = ATTACK_MODE_NONE;
}
else if (benchmark_mode == 1)
{
- if (gpu_accel_chgd == 1 || gpu_loops_chgd == 1)
+ if (kernel_accel_chgd == 1 || kernel_loops_chgd == 1)
{
- log_error ("ERROR: Benchmark-mode 1 does not allow gpu-accel or gpu-loops changed");
+ log_error ("ERROR: Benchmark-mode 1 does not allow kernel-accel or kernel-loops changed");
return (-1);
}
}
}
+ if (attack_mode != ATTACK_MODE_STRAIGHT)
+ {
+ if ((weak_hash_threshold != WEAK_HASH_THRESHOLD) && (weak_hash_threshold != 0))
+ {
+ log_error ("ERROR: setting --weak-hash-threshold allowed only in straight-attack mode");
+
+ return (-1);
+ }
+
+ weak_hash_threshold = 0;
+ }
+
/**
* induction directory
*/
{
induction_directory = (char *) mymalloc (session_size);
- snprintf (induction_directory, session_size - 1, "%s.%s", session, INDUCT_DIR);
+ snprintf (induction_directory, session_size - 1, "%s/%s.%s", session_dir, session, INDUCT_DIR);
// create induction folder if it does not already exist
{
char *induction_directory_mv = (char *) mymalloc (session_size);
- snprintf (induction_directory_mv, session_size - 1, "%s.induct.%d", session, (int) proc_start);
+ snprintf (induction_directory_mv, session_size - 1, "%s/%s.induct.%d", session_dir, session, (int) proc_start);
if (rename (induction_directory, induction_directory_mv) != 0)
{
}
}
- #ifdef _WIN
- #define mkdir(name,mode) mkdir (name)
- #endif
-
if (mkdir (induction_directory, 0700) == -1)
{
log_error ("ERROR: %s: %s", induction_directory, strerror (errno));
* loopback
*/
- size_t loopback_size = session_size + strlen (LOOPBACK_FILE) + 12;
+ size_t loopback_size = strlen (session_dir) + 1 + session_size + strlen (LOOPBACK_FILE) + 12;
char *loopback_file = (char *) mymalloc (loopback_size);
{
outfile_check_directory = (char *) mymalloc (session_size);
- snprintf (outfile_check_directory, session_size - 1, "%s.%s", session, OUTFILES_DIR);
+ snprintf (outfile_check_directory, session_size - 1, "%s/%s.%s", session_dir, session, OUTFILES_DIR);
}
else
{
}
else if (outfile_check_dir == NULL)
{
- #ifdef _WIN
- #define mkdir(name,mode) mkdir (name)
- #endif
-
if (mkdir (outfile_check_directory, 0700) == -1)
{
log_error ("ERROR: %s: %s", outfile_check_directory, strerror (errno));
data.benchmark = benchmark;
data.skip = skip;
data.limit = limit;
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
data.powertune_enable = powertune_enable;
+ #endif
data.logfile_disable = logfile_disable;
data.truecrypt_keyfiles = truecrypt_keyfiles;
data.scrypt_tmto = scrypt_tmto;
- /**
- * folders, as discussed on https://github.com/hashcat/oclHashcat/issues/20
- */
-
- #ifdef LINUX
-
- char *resolved_path = realpath (myargv[0], NULL);
-
- char *install_dir = get_install_dir (resolved_path);
- char *profile_dir = NULL;
- char *session_dir = NULL;
-
- if (strcmp (install_dir, INSTALL_FOLDER) == 0)
- {
- struct passwd *pw = getpwuid (getuid ());
-
- const char *homedir = pw->pw_dir;
-
- profile_dir = get_profile_dir (homedir);
- session_dir = get_session_dir (profile_dir, session);
- }
- else
- {
- profile_dir = install_dir;
- session_dir = install_dir;
- }
-
- myfree (resolved_path);
-
- #else
-
- char *install_dir = get_install_dir (myargv[0]);
- char *profile_dir = install_dir;
- char *session_dir = install_dir;
-
- #endif
-
- data.install_dir = install_dir;
- data.profile_dir = profile_dir;
- data.session_dir = session_dir;
-
/**
* cpu affinity
*/
+ #ifndef OSX
if (cpu_affinity)
{
set_cpu_affinity (cpu_affinity);
}
+ #endif
if (rp_gen_seed_chgd == 0)
{
if (logfile_disable == 0)
{
- size_t logfile_size = strlen (session) + 32;
+ size_t logfile_size = strlen (session_dir) + 1 + strlen (session) + 32;
char *logfile = (char *) mymalloc (logfile_size);
- snprintf (logfile, logfile_size - 1, "%s.log", session);
+ snprintf (logfile, logfile_size - 1, "%s/%s.log", session_dir, session);
data.logfile = logfile;
logfile_top_uint (bitmap_max);
logfile_top_uint (debug_mode);
logfile_top_uint (force);
- logfile_top_uint (gpu_accel);
- logfile_top_uint (gpu_loops);
+ logfile_top_uint (kernel_accel);
+ logfile_top_uint (kernel_loops);
+ #ifdef HAVE_HWMON
logfile_top_uint (gpu_temp_abort);
logfile_top_uint (gpu_temp_disable);
logfile_top_uint (gpu_temp_retain);
+ #endif
logfile_top_uint (hash_mode);
logfile_top_uint (hex_charset);
logfile_top_uint (hex_salt);
logfile_top_uint (outfile_check_timer);
logfile_top_uint (outfile_format);
logfile_top_uint (potfile_disable);
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
logfile_top_uint (powertune_enable);
+ #endif
logfile_top_uint (scrypt_tmto);
logfile_top_uint (quiet);
logfile_top_uint (remove);
logfile_top_uint64 (limit);
logfile_top_uint64 (skip);
logfile_top_char (separator);
+ #ifndef OSX
logfile_top_string (cpu_affinity);
+ #endif
logfile_top_string (custom_charset_1);
logfile_top_string (custom_charset_2);
logfile_top_string (custom_charset_3);
logfile_top_string (custom_charset_4);
logfile_top_string (debug_file);
- logfile_top_string (gpu_devices);
+ logfile_top_string (opencl_devices);
+ logfile_top_string (opencl_platforms);
+ logfile_top_string (opencl_device_types);
+ logfile_top_uint (opencl_vector_width);
logfile_top_string (induction_dir);
logfile_top_string (markov_hcstat);
logfile_top_string (outfile);
logfile_top_string (truecrypt_keyfiles);
/**
- * devices
+ * OpenCL platform selection
+ */
+
+ u32 opencl_platforms_filter = setup_opencl_platforms_filter (opencl_platforms);
+
+ /**
+ * OpenCL device selection
+ */
+
+ u32 devices_filter = setup_devices_filter (opencl_devices);
+
+ /**
+ * OpenCL device type selection
*/
- uint gpu_devicemask = devices_to_devicemask (gpu_devices);
+ cl_device_type device_types_filter = setup_device_types_filter (opencl_device_types);
/**
* benchmark
{
case 0: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 10: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS14;
case 11: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS14;
case 12: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS14;
case 20: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 21: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 22: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 23: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 30: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 40: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 50: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS14;
case 60: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 100: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 101: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 110: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 111: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 112: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15
case 120: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 121: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 122: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 124: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 130: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 131: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_PT_UPPER
case 132: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 133: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 140: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 141: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 150: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 160: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 190: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 200: hash_type = HASH_TYPE_MYSQL;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = 0;
kern_type = KERN_TYPE_MYSQL;
dgst_size = DGST_SIZE_4_4; // originally DGST_SIZE_4_2
case 300: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 400: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_PHPASS;
dgst_size = DGST_SIZE_4_4;
case 500: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_MD5CRYPT;
dgst_size = DGST_SIZE_4_4;
case 501: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_HASH_COPY;
kern_type = KERN_TYPE_MD5CRYPT;
case 900: hash_type = HASH_TYPE_MD4;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 1000: hash_type = HASH_TYPE_MD4;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 1100: hash_type = HASH_TYPE_MD4;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 1400: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1410: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 1420: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1421: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1430: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 1440: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 1441: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 1450: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_HMACSHA256_PW;
case 1460: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1500: hash_type = HASH_TYPE_DESCRYPT;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_BITSLICE;
kern_type = KERN_TYPE_DESCRYPT;
case 1600: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_APR1CRYPT;
dgst_size = DGST_SIZE_4_4;
case 1700: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1710: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 1711: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 1720: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1722: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 1730: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 1731: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 1740: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15
case 1750: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_HMACSHA512_PW;
case 1760: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 1800: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_SHA512CRYPT;
dgst_size = DGST_SIZE_8_8;
case 2100: hash_type = HASH_TYPE_DCC2;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE // should be OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_LOWER
| OPTS_TYPE_ST_UNICODE;
case 2400: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_MD5PIX;
dgst_size = DGST_SIZE_4_4;
case 2410: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_MD5ASA;
dgst_size = DGST_SIZE_4_4;
case 2500: hash_type = HASH_TYPE_WPA;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_WPA;
dgst_size = DGST_SIZE_4_4;
case 2600: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_VIRTUAL;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 2611: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 2612: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 2711: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 2811: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 3000: hash_type = HASH_TYPE_LM;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_UPPER
| OPTS_TYPE_PT_BITSLICE;
case 3100: hash_type = HASH_TYPE_ORACLEH;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_UPPER
| OPTS_TYPE_ST_UPPER;
case 3200: hash_type = HASH_TYPE_BCRYPT;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_GENERATE_LE;
kern_type = KERN_TYPE_BCRYPT;
case 3710: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 3711: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 3800: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADDBITS14;
kern_type = KERN_TYPE_MD5_SLT_PW_SLT;
case 4300: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_VIRTUAL;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 4400: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 4500: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 4700: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 4800: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADDBITS14;
kern_type = KERN_TYPE_MD5_CHAP;
case 4900: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_SHA1_SLT_PW_SLT;
dgst_size = DGST_SIZE_4_5;
case 5000: hash_type = HASH_TYPE_KECCAK;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD01;
kern_type = KERN_TYPE_KECCAK;
case 5100: hash_type = HASH_TYPE_MD5H;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
case 5200: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_PSAFE3;
dgst_size = DGST_SIZE_4_8;
case 5300: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_IKEPSK_MD5;
case 5400: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_IKEPSK_SHA1;
case 5500: hash_type = HASH_TYPE_NETNTLM;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 5600: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14
case 5700: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 5800: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE // should be OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_ANDROIDPIN;
case 6000: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80;
kern_type = KERN_TYPE_RIPEMD160;
case 6100: hash_type = HASH_TYPE_WHIRLPOOL;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80;
kern_type = KERN_TYPE_WHIRLPOOL;
case 6211: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCRIPEMD160_XTS512;
dgst_size = DGST_SIZE_4_5;
case 6212: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCRIPEMD160_XTS1024;
dgst_size = DGST_SIZE_4_5;
case 6213: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCRIPEMD160_XTS1536;
dgst_size = DGST_SIZE_4_5;
case 6221: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_TCSHA512_XTS512;
dgst_size = DGST_SIZE_8_8;
case 6222: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_TCSHA512_XTS1024;
dgst_size = DGST_SIZE_8_8;
case 6223: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_TCSHA512_XTS1536;
dgst_size = DGST_SIZE_8_8;
case 6231: hash_type = HASH_TYPE_WHIRLPOOL;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCWHIRLPOOL_XTS512;
dgst_size = DGST_SIZE_4_8;
case 6232: hash_type = HASH_TYPE_WHIRLPOOL;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCWHIRLPOOL_XTS1024;
dgst_size = DGST_SIZE_4_8;
case 6233: hash_type = HASH_TYPE_WHIRLPOOL;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCWHIRLPOOL_XTS1536;
dgst_size = DGST_SIZE_4_8;
case 6241: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCRIPEMD160_XTS512;
dgst_size = DGST_SIZE_4_5;
case 6242: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCRIPEMD160_XTS1024;
dgst_size = DGST_SIZE_4_5;
case 6243: hash_type = HASH_TYPE_RIPEMD160;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_TCRIPEMD160_XTS1536;
dgst_size = DGST_SIZE_4_5;
case 6300: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_MD5AIX;
dgst_size = DGST_SIZE_4_4;
case 6400: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_SHA256AIX;
dgst_size = DGST_SIZE_4_8;
case 6500: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_SHA512AIX;
dgst_size = DGST_SIZE_8_8;
case 6600: hash_type = HASH_TYPE_AES;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_AGILEKEY;
dgst_size = DGST_SIZE_4_5; // because kernel uses _SHA1_
case 6700: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_SHA1AIX;
dgst_size = DGST_SIZE_4_5;
case 6800: hash_type = HASH_TYPE_AES;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_LASTPASS;
dgst_size = DGST_SIZE_4_8; // because kernel uses _SHA256_
case 6900: hash_type = HASH_TYPE_GOST;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_GOST;
dgst_size = DGST_SIZE_4_8;
case 7100: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_PBKDF2_SHA512;
dgst_size = DGST_SIZE_8_16;
case 7200: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_PBKDF2_SHA512;
dgst_size = DGST_SIZE_8_16;
case 7300: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15;
case 7400: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_SHA256CRYPT;
dgst_size = DGST_SIZE_4_8;
case 7500: hash_type = HASH_TYPE_KRB5PA;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_KRB5PA;
dgst_size = DGST_SIZE_4_4;
sort_by_digest = sort_by_digest_4_4;
opti_type = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
- dgst_pos0 = 3;
- dgst_pos1 = 7;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
dgst_pos2 = 2;
- dgst_pos3 = 6;
+ dgst_pos3 = 3;
break;
case 7600: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 7700: hash_type = HASH_TYPE_SAPB;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_UPPER
| OPTS_TYPE_ST_UPPER;
case 7800: hash_type = HASH_TYPE_SAPG;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_UPPER;
case 7900: hash_type = HASH_TYPE_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_DRUPAL7;
dgst_size = DGST_SIZE_8_8;
case 8000: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_UNICODE
| OPTS_TYPE_ST_ADD80
case 8100: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE;
kern_type = KERN_TYPE_NETSCALER;
dgst_size = DGST_SIZE_4_5;
case 8200: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_CLOUDKEY;
dgst_size = DGST_SIZE_4_8;
case 8300: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_HEX
| OPTS_TYPE_ST_ADD80;
case 8400: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 8500: hash_type = HASH_TYPE_DESRACF;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_UPPER;
kern_type = KERN_TYPE_RACF;
case 8600: hash_type = HASH_TYPE_LOTUS5;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_LOTUS5;
dgst_size = DGST_SIZE_4_4;
case 8700: hash_type = HASH_TYPE_LOTUS6;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_LOTUS6;
dgst_size = DGST_SIZE_4_4;
case 8800: hash_type = HASH_TYPE_ANDROIDFDE;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_ANDROIDFDE;
dgst_size = DGST_SIZE_4_4;
case 8900: hash_type = HASH_TYPE_SCRYPT;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_SCRYPT;
dgst_size = DGST_SIZE_4_8;
case 9000: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_GENERATE_LE;
kern_type = KERN_TYPE_PSAFE2;
case 9100: hash_type = HASH_TYPE_LOTUS8;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_LOTUS8;
dgst_size = DGST_SIZE_4_4; // originally DGST_SIZE_4_2
case 9200: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_PBKDF2_SHA256;
dgst_size = DGST_SIZE_4_32;
case 9300: hash_type = HASH_TYPE_SCRYPT;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_SCRYPT;
dgst_size = DGST_SIZE_4_8;
case 9400: hash_type = HASH_TYPE_OFFICE2007;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_OFFICE2007;
dgst_size = DGST_SIZE_4_4;
case 9500: hash_type = HASH_TYPE_OFFICE2010;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_OFFICE2010;
dgst_size = DGST_SIZE_4_4;
case 9600: hash_type = HASH_TYPE_OFFICE2013;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_OFFICE2013;
dgst_size = DGST_SIZE_4_4;
case 9700: hash_type = HASH_TYPE_OLDOFFICE01;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_UNICODE;
case 9710: hash_type = HASH_TYPE_OLDOFFICE01;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80;
kern_type = KERN_TYPE_OLDOFFICE01CM1;
case 9720: hash_type = HASH_TYPE_OLDOFFICE01;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_UNICODE
case 9800: hash_type = HASH_TYPE_OLDOFFICE34;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_UNICODE;
case 9810: hash_type = HASH_TYPE_OLDOFFICE34;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_OLDOFFICE34CM1;
dgst_size = DGST_SIZE_4_4;
case 9820: hash_type = HASH_TYPE_OLDOFFICE34;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_UNICODE
case 9900: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_RADMIN2;
dgst_size = DGST_SIZE_4_4;
case 10000: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_PBKDF2_SHA256;
dgst_size = DGST_SIZE_4_32;
case 10100: hash_type = HASH_TYPE_SIPHASH;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_SIPHASH;
dgst_size = DGST_SIZE_4_4; // originally DGST_SIZE_4_2
case 10200: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS14;
case 10300: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE; // should be OPTS_TYPE_PT_GENERATE_BE
kern_type = KERN_TYPE_SAPH_SHA1;
dgst_size = DGST_SIZE_4_5;
case 10400: hash_type = HASH_TYPE_PDFU16;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_PDF11;
dgst_size = DGST_SIZE_4_4;
case 10410: hash_type = HASH_TYPE_PDFU16;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_PDF11CM1;
dgst_size = DGST_SIZE_4_4;
case 10420: hash_type = HASH_TYPE_PDFU16;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_PDF11CM2;
dgst_size = DGST_SIZE_4_4;
case 10500: hash_type = HASH_TYPE_PDFU16;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_PDF14;
dgst_size = DGST_SIZE_4_4;
case 10600: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_ADDBITS15
case 10700: hash_type = HASH_TYPE_PDFU32;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_HASH_COPY;
kern_type = KERN_TYPE_PDF17L8;
case 10800: hash_type = HASH_TYPE_SHA384;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS15;
case 10900: hash_type = HASH_TYPE_PBKDF2_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_BASE64
| OPTS_TYPE_HASH_COPY;
case 11000: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80;
kern_type = KERN_TYPE_PRESTASHOP;
case 11100: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_ADD80;
kern_type = KERN_TYPE_POSTGRESQL_AUTH;
case 11200: hash_type = HASH_TYPE_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_ST_HEX;
case 11300: hash_type = HASH_TYPE_BITCOIN_WALLET;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_HEX
| OPTS_TYPE_ST_ADD80;
case 11400: hash_type = HASH_TYPE_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_HASH_COPY;
case 11500: hash_type = HASH_TYPE_CRC32;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_GENERATE_LE
| OPTS_TYPE_ST_HEX;
case 11600: hash_type = HASH_TYPE_AES;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_NEVERCRACK;
kern_type = KERN_TYPE_SEVEN_ZIP;
case 11700: hash_type = HASH_TYPE_GOST_2012SBOG_256;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD01;
kern_type = KERN_TYPE_GOST_2012SBOG_256;
case 11800: hash_type = HASH_TYPE_GOST_2012SBOG_512;
salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD01;
kern_type = KERN_TYPE_GOST_2012SBOG_512;
case 11900: hash_type = HASH_TYPE_PBKDF2_MD5;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_BASE64
| OPTS_TYPE_HASH_COPY;
case 12000: hash_type = HASH_TYPE_PBKDF2_SHA1;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_BASE64
| OPTS_TYPE_HASH_COPY;
case 12100: hash_type = HASH_TYPE_PBKDF2_SHA512;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_ST_BASE64
| OPTS_TYPE_HASH_COPY;
case 12200: hash_type = HASH_TYPE_ECRYPTFS;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_ECRYPTFS;
dgst_size = DGST_SIZE_8_8;
case 12300: hash_type = HASH_TYPE_ORACLET;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_ORACLET;
dgst_size = DGST_SIZE_8_16;
case 12400: hash_type = HASH_TYPE_BSDICRYPT;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_BSDICRYPT;
dgst_size = DGST_SIZE_4_4;
case 12500: hash_type = HASH_TYPE_RAR3HP;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_RAR3;
dgst_size = DGST_SIZE_4_4;
case 12600: hash_type = HASH_TYPE_SHA256;
salt_type = SALT_TYPE_INTERN;
- attack_exec = ATTACK_EXEC_ON_GPU;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_PT_ADD80;
kern_type = KERN_TYPE_CF10;
case 12700: hash_type = HASH_TYPE_AES;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_HASH_COPY;
kern_type = KERN_TYPE_MYWALLET;
case 12800: hash_type = HASH_TYPE_PBKDF2_SHA256;
salt_type = SALT_TYPE_EMBEDDED;
- attack_exec = ATTACK_EXEC_ON_CPU;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
kern_type = KERN_TYPE_MS_DRSR;
dgst_size = DGST_SIZE_4_8;
dgst_pos3 = 3;
break;
+ case 12900: hash_type = HASH_TYPE_PBKDF2_SHA256;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = KERN_TYPE_ANDROIDFDE_SAMSUNG;
+ dgst_size = DGST_SIZE_4_8;
+ parse_func = androidfde_samsung_parse_hash;
+ sort_by_digest = sort_by_digest_4_8;
+ opti_type = OPTI_TYPE_ZERO_BYTE;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
+ dgst_pos2 = 2;
+ dgst_pos3 = 3;
+ break;
+
+ case 13000: hash_type = HASH_TYPE_PBKDF2_SHA256;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = KERN_TYPE_RAR5;
+ dgst_size = DGST_SIZE_4_4;
+ parse_func = rar5_parse_hash;
+ sort_by_digest = sort_by_digest_4_4;
+ opti_type = OPTI_TYPE_ZERO_BYTE;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
+ dgst_pos2 = 2;
+ dgst_pos3 = 3;
+ break;
+
default: usage_mini_print (PROGNAME); return (-1);
}
case 11900: esalt_size = sizeof (pbkdf2_md5_t); break;
case 12000: esalt_size = sizeof (pbkdf2_sha1_t); break;
case 12100: esalt_size = sizeof (pbkdf2_sha512_t); break;
+ case 13000: esalt_size = sizeof (rar5_t); break;
}
data.esalt_size = esalt_size;
{
memset (dictstat, 0, sizeof (dictstat));
- snprintf (dictstat, sizeof (dictstat) - 1, "%s/%s.dictstat", install_dir, PROGNAME);
+ snprintf (dictstat, sizeof (dictstat) - 1, "%s/hashcat.dictstat", profile_dir);
dictstat_fp = fopen (dictstat, "rb");
* potfile
*/
- char potfile[256];
+ char potfile[256] = { 0 };
- memset (potfile, 0, sizeof (potfile));
-
- snprintf (potfile, sizeof (potfile) - 1, "%s.pot", session);
+ snprintf (potfile, sizeof (potfile) - 1, "%s/%s.pot", session_dir, session);
data.pot_fp = NULL;
continue;
}
+ if (plain_len >= 255) continue;
+
memcpy (pot_ptr->plain_buf, plain_buf, plain_len);
pot_ptr->plain_len = plain_len;
}
/**
- * gpu accel and loops auto adjustment
+ * kernel accel and loops auto adjustment
*/
- if (gpu_accel_chgd == 0) gpu_accel = set_gpu_accel (hash_mode);
- if (gpu_loops_chgd == 0) gpu_loops = set_gpu_loops (hash_mode);
+ if (kernel_accel_chgd == 0) kernel_accel = set_kernel_accel (hash_mode);
+ if (kernel_loops_chgd == 0) kernel_loops = set_kernel_loops (hash_mode);
if (workload_profile == 1)
{
- gpu_loops /= 8;
- gpu_accel /= 4;
+ kernel_loops /= 8;
+ kernel_accel /= 4;
- if (gpu_loops == 0) gpu_loops = 8;
- if (gpu_accel == 0) gpu_accel = 2;
+ if (kernel_loops == 0) kernel_loops = 8;
+ if (kernel_accel == 0) kernel_accel = 2;
}
else if (workload_profile == 3)
{
- gpu_loops *= 8;
- gpu_accel *= 4;
+ kernel_loops *= 8;
+ kernel_accel *= 4;
- if (gpu_loops > 1024) gpu_loops = 1024;
- if (gpu_accel > 256) gpu_accel = 256; // causes memory problems otherwise
+ if (kernel_loops > 1024) kernel_loops = 1024;
+ if (kernel_accel > 256) kernel_accel = 256; // causes memory problems otherwise
}
- // those hashes *must* run at a specific gpu_loops count because of some optimization inside the kernel
+ // those hashes *must* run at a specific kernel_loops count because of some optimization inside the kernel
if ((opts_type & OPTS_TYPE_PT_BITSLICE) && (attack_mode == ATTACK_MODE_BF))
{
- gpu_loops = 1024;
+ kernel_loops = 1024;
}
if (hash_mode == 12500)
{
- gpu_loops = ROUNDS_RAR3 / 16;
+ kernel_loops = ROUNDS_RAR3 / 16;
}
- data.gpu_accel = gpu_accel;
- data.gpu_loops = gpu_loops;
+ data.kernel_accel = kernel_accel;
+ data.kernel_loops = kernel_loops;
/**
* word len
break;
}
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
switch (attack_kern)
{
* charsets : keep them together for more easy maintainnce
*/
- cs_t mp_sys[6];
- cs_t mp_usr[4];
-
- memset (mp_sys, 0, sizeof (mp_sys));
- memset (mp_usr, 0, sizeof (mp_usr));
+ cs_t mp_sys[6] = { { { 0 }, 0 } };
+ cs_t mp_usr[4] = { { { 0 }, 0 } };
mp_setup_sys (mp_sys);
if ((username && (remove || show)) || (opts_type & OPTS_TYPE_HASH_COPY))
{
- uint32_t hash_pos;
+ u32 hash_pos;
for (hash_pos = 0; hash_pos < hashes_avail; hash_pos++)
{
wpa_t *wpa = (wpa_t *) hashes_buf[hashes_cnt].esalt;
- unsigned char *pke_ptr = (unsigned char *) wpa->pke;
+ u8 *pke_ptr = (u8 *) wpa->pke;
// do the appending task
break;
case 12800: hashes_buf[0].salt->salt_iter = ROUNDS_MS_DRSR - 1;
break;
+ case 12900: hashes_buf[0].salt->salt_iter = ROUNDS_ANDROIDFDE_SAMSUNG - 1;
+ break;
+ case 13000: hashes_buf[0].salt->salt_iter = ROUNDS_RAR5 - 1;
+ break;
}
// set special tuning for benchmark-mode 1
if (benchmark_mode == 1)
{
- gpu_loops *= 8;
- gpu_accel *= 4;
+ kernel_loops *= 8;
+ kernel_accel *= 4;
switch (hash_mode)
{
- case 400: gpu_loops = ROUNDS_PHPASS;
- gpu_accel = 32;
+ case 400: kernel_loops = ROUNDS_PHPASS;
+ kernel_accel = 32;
+ break;
+ case 500: kernel_loops = ROUNDS_MD5CRYPT;
+ kernel_accel = 32;
break;
- case 500: gpu_loops = ROUNDS_MD5CRYPT;
- gpu_accel = 32;
+ case 501: kernel_loops = ROUNDS_MD5CRYPT;
+ kernel_accel = 32;
break;
- case 501: gpu_loops = ROUNDS_MD5CRYPT;
- gpu_accel = 32;
+ case 1600: kernel_loops = ROUNDS_MD5CRYPT;
+ kernel_accel = 32;
break;
- case 1600: gpu_loops = ROUNDS_MD5CRYPT;
- gpu_accel = 32;
+ case 1800: kernel_loops = ROUNDS_SHA512CRYPT;
+ kernel_accel = 16;
break;
- case 1800: gpu_loops = ROUNDS_SHA512CRYPT;
- gpu_accel = 16;
+ case 2100: kernel_loops = ROUNDS_DCC2;
+ kernel_accel = 16;
break;
- case 2100: gpu_loops = ROUNDS_DCC2;
- gpu_accel = 16;
+ case 2500: kernel_loops = ROUNDS_WPA2;
+ kernel_accel = 32;
break;
- case 2500: gpu_loops = ROUNDS_WPA2;
- gpu_accel = 32;
+ case 3200: kernel_loops = ROUNDS_BCRYPT;
+ kernel_accel = 8;
break;
- case 3200: gpu_loops = ROUNDS_BCRYPT;
- gpu_accel = 8;
+ case 5200: kernel_loops = ROUNDS_PSAFE3;
+ kernel_accel = 16;
break;
- case 5200: gpu_loops = ROUNDS_PSAFE3;
- gpu_accel = 16;
+ case 5800: kernel_loops = ROUNDS_ANDROIDPIN;
+ kernel_accel = 16;
break;
- case 5800: gpu_loops = ROUNDS_ANDROIDPIN;
- gpu_accel = 16;
+ case 6211: kernel_loops = ROUNDS_TRUECRYPT_2K;
+ kernel_accel = 64;
break;
- case 6211: gpu_loops = ROUNDS_TRUECRYPT_2K;
- gpu_accel = 64;
+ case 6212: kernel_loops = ROUNDS_TRUECRYPT_2K;
+ kernel_accel = 32;
break;
- case 6212: gpu_loops = ROUNDS_TRUECRYPT_2K;
- gpu_accel = 32;
+ case 6213: kernel_loops = ROUNDS_TRUECRYPT_2K;
+ kernel_accel = 32;
break;
- case 6213: gpu_loops = ROUNDS_TRUECRYPT_2K;
- gpu_accel = 32;
+ case 6221: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 8;
break;
- case 6221: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 8;
+ case 6222: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 8;
break;
- case 6222: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 8;
+ case 6223: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 8;
break;
- case 6223: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 8;
+ case 6231: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 8;
break;
- case 6231: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 8;
+ case 6232: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 8;
break;
- case 6232: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 8;
+ case 6233: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 8;
break;
- case 6233: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 8;
+ case 6241: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 128;
break;
- case 6241: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 128;
+ case 6242: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 64;
break;
- case 6242: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 64;
+ case 6243: kernel_loops = ROUNDS_TRUECRYPT_1K;
+ kernel_accel = 64;
break;
- case 6243: gpu_loops = ROUNDS_TRUECRYPT_1K;
- gpu_accel = 64;
+ case 6300: kernel_loops = ROUNDS_MD5CRYPT;
+ kernel_accel = 32;
break;
- case 6300: gpu_loops = ROUNDS_MD5CRYPT;
- gpu_accel = 32;
+ case 6700: kernel_loops = ROUNDS_SHA1AIX;
+ kernel_accel = 128;
break;
- case 6700: gpu_loops = ROUNDS_SHA1AIX;
- gpu_accel = 128;
+ case 6400: kernel_loops = ROUNDS_SHA256AIX;
+ kernel_accel = 128;
break;
- case 6400: gpu_loops = ROUNDS_SHA256AIX;
- gpu_accel = 128;
+ case 6500: kernel_loops = ROUNDS_SHA512AIX;
+ kernel_accel = 32;
break;
- case 6500: gpu_loops = ROUNDS_SHA512AIX;
- gpu_accel = 32;
+ case 6600: kernel_loops = ROUNDS_AGILEKEY;
+ kernel_accel = 64;
break;
- case 6600: gpu_loops = ROUNDS_AGILEKEY;
- gpu_accel = 64;
+ case 6800: kernel_loops = ROUNDS_LASTPASS;
+ kernel_accel = 64;
break;
- case 6800: gpu_loops = ROUNDS_LASTPASS;
- gpu_accel = 64;
+ case 7100: kernel_loops = ROUNDS_SHA512OSX;
+ kernel_accel = 8;
break;
- case 7100: gpu_loops = ROUNDS_SHA512OSX;
- gpu_accel = 8;
+ case 7200: kernel_loops = ROUNDS_GRUB;
+ kernel_accel = 16;
break;
- case 7200: gpu_loops = ROUNDS_GRUB;
- gpu_accel = 16;
+ case 7400: kernel_loops = ROUNDS_SHA256CRYPT;
+ kernel_accel = 8;
break;
- case 7400: gpu_loops = ROUNDS_SHA256CRYPT;
- gpu_accel = 8;
+ case 7900: kernel_loops = ROUNDS_DRUPAL7;
+ kernel_accel = 8;
break;
- case 7900: gpu_loops = ROUNDS_DRUPAL7;
- gpu_accel = 8;
+ case 8200: kernel_loops = ROUNDS_CLOUDKEY;
+ kernel_accel = 8;
break;
- case 8200: gpu_loops = ROUNDS_CLOUDKEY;
- gpu_accel = 8;
+ case 8800: kernel_loops = ROUNDS_ANDROIDFDE;
+ kernel_accel = 32;
break;
- case 8800: gpu_loops = ROUNDS_ANDROIDFDE;
- gpu_accel = 32;
+ case 8900: kernel_loops = 1;
+ kernel_accel = 64;
break;
- case 8900: gpu_loops = 1;
- gpu_accel = 64;
+ case 9000: kernel_loops = ROUNDS_PSAFE2;
+ kernel_accel = 16;
break;
- case 9000: gpu_loops = ROUNDS_PSAFE2;
- gpu_accel = 16;
+ case 9100: kernel_loops = ROUNDS_LOTUS8;
+ kernel_accel = 64;
break;
- case 9100: gpu_loops = ROUNDS_LOTUS8;
- gpu_accel = 64;
+ case 9200: kernel_loops = ROUNDS_CISCO8;
+ kernel_accel = 8;
break;
- case 9200: gpu_loops = ROUNDS_CISCO8;
- gpu_accel = 8;
+ case 9300: kernel_loops = 1;
+ kernel_accel = 4;
break;
- case 9300: gpu_loops = 1;
- gpu_accel = 8;
+ case 9400: kernel_loops = ROUNDS_OFFICE2007;
+ kernel_accel = 32;
break;
- case 9400: gpu_loops = ROUNDS_OFFICE2007;
- gpu_accel = 32;
+ case 9500: kernel_loops = ROUNDS_OFFICE2010;
+ kernel_accel = 32;
break;
- case 9500: gpu_loops = ROUNDS_OFFICE2010;
- gpu_accel = 32;
+ case 9600: kernel_loops = ROUNDS_OFFICE2013;
+ kernel_accel = 8;
break;
- case 9600: gpu_loops = ROUNDS_OFFICE2013;
- gpu_accel = 8;
+ case 10000: kernel_loops = ROUNDS_DJANGOPBKDF2;
+ kernel_accel = 8;
break;
- case 10000: gpu_loops = ROUNDS_DJANGOPBKDF2;
- gpu_accel = 8;
+ case 10300: kernel_loops = ROUNDS_SAPH_SHA1;
+ kernel_accel = 16;
break;
- case 10300: gpu_loops = ROUNDS_SAPH_SHA1;
- gpu_accel = 16;
+ case 10500: kernel_loops = ROUNDS_PDF14;
+ kernel_accel = 256;
break;
- case 10500: gpu_loops = ROUNDS_PDF14;
- gpu_accel = 256;
+ case 10700: kernel_loops = ROUNDS_PDF17L8;
+ kernel_accel = 8;
break;
- case 10700: gpu_loops = ROUNDS_PDF17L8;
- gpu_accel = 8;
+ case 10900: kernel_loops = ROUNDS_PBKDF2_SHA256;
+ kernel_accel = 8;
break;
- case 10900: gpu_loops = ROUNDS_PBKDF2_SHA256;
- gpu_accel = 8;
+ case 11300: kernel_loops = ROUNDS_BITCOIN_WALLET;
+ kernel_accel = 8;
break;
- case 11300: gpu_loops = ROUNDS_BITCOIN_WALLET;
- gpu_accel = 8;
+ case 11600: kernel_loops = ROUNDS_SEVEN_ZIP;
+ kernel_accel = 8;
break;
- case 11600: gpu_loops = ROUNDS_SEVEN_ZIP;
- gpu_accel = 8;
+ case 11900: kernel_loops = ROUNDS_PBKDF2_MD5;
+ kernel_accel = 8;
break;
- case 11900: gpu_loops = ROUNDS_PBKDF2_MD5;
- gpu_accel = 8;
+ case 12000: kernel_loops = ROUNDS_PBKDF2_SHA1;
+ kernel_accel = 8;
break;
- case 12000: gpu_loops = ROUNDS_PBKDF2_SHA1;
- gpu_accel = 8;
+ case 12100: kernel_loops = ROUNDS_PBKDF2_SHA512;
+ kernel_accel = 8;
break;
- case 12100: gpu_loops = ROUNDS_PBKDF2_SHA512;
- gpu_accel = 8;
+ case 12200: kernel_loops = ROUNDS_ECRYPTFS;
+ kernel_accel = 8;
break;
- case 12200: gpu_loops = ROUNDS_ECRYPTFS;
- gpu_accel = 8;
+ case 12300: kernel_loops = ROUNDS_ORACLET;
+ kernel_accel = 8;
break;
- case 12300: gpu_loops = ROUNDS_ORACLET;
- gpu_accel = 8;
+ case 12500: kernel_loops = ROUNDS_RAR3;
+ kernel_accel = 32;
break;
- case 12500: gpu_loops = ROUNDS_RAR3;
- gpu_accel = 32;
+ case 12700: kernel_loops = ROUNDS_MYWALLET;
+ kernel_accel = 512;
break;
- case 12700: gpu_loops = ROUNDS_MYWALLET;
- gpu_accel = 512;
+ case 12800: kernel_loops = ROUNDS_MS_DRSR;
+ kernel_accel = 512;
break;
- case 12800: gpu_loops = ROUNDS_MS_DRSR;
- gpu_accel = 512;
+ case 12900: kernel_loops = ROUNDS_ANDROIDFDE_SAMSUNG;
+ kernel_accel = 8;
+ break;
+ case 13000: kernel_loops = ROUNDS_RAR5;
+ kernel_accel = 8;
break;
}
break;
}
- if (gpu_loops > 1024) gpu_loops = 1024;
- if (gpu_accel > 256) gpu_accel = 256; // causes memory problems otherwise
+ if (kernel_loops > 1024) kernel_loops = 1024;
+ if (kernel_accel > 256) kernel_accel = 256; // causes memory problems otherwise
}
if ((opts_type & OPTS_TYPE_PT_BITSLICE) && (attack_mode == ATTACK_MODE_BF))
{
- gpu_loops = 1024;
+ kernel_loops = 1024;
}
if (hash_mode == 12500)
{
- gpu_loops = ROUNDS_RAR3 / 16;
+ kernel_loops = ROUNDS_RAR3 / 16;
}
- data.gpu_accel = gpu_accel;
- data.gpu_loops = gpu_loops;
+ data.kernel_accel = kernel_accel;
+ data.kernel_loops = kernel_loops;
hashes_cnt = 1;
}
pke[i] = byte_swap_32 (wpa->pke[i]);
}
- unsigned char mac1[6];
- unsigned char mac2[6];
+ u8 mac1[6];
+ u8 mac2[6];
memcpy (mac1, pke_ptr + 23, 6);
memcpy (mac2, pke_ptr + 29, 6);
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != (unsigned char) hex_to_char (&mac1_pos[j]))
+ if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
found = NULL;
break;
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != (unsigned char) hex_to_char (&mac2_pos[j]))
+ if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
found = NULL;
break;
do
{
- truecrypt_crc32 (keyfile, (unsigned char *) keyfile_buf);
+ truecrypt_crc32 (keyfile, (u8 *) keyfile_buf);
} while ((keyfile = strtok (NULL, ",")) != NULL);
if (digests_cnt == 1)
opti_type |= OPTI_TYPE_SINGLE_HASH;
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
opti_type |= OPTI_TYPE_NOT_ITERATED;
if (attack_mode == ATTACK_MODE_BF)
* Some algorithm, like descrypt, can benefit from JIT compilation
*/
- uint force_jit_compilation = 0;
+ int force_jit_compilation = -1;
if (hash_mode == 8900)
{
* load rules
*/
- uint *all_gpu_rules_cnt = NULL;
+ uint *all_kernel_rules_cnt = NULL;
- gpu_rule_t **all_gpu_rules_buf = NULL;
+ kernel_rule_t **all_kernel_rules_buf = NULL;
if (rp_files_cnt)
{
- all_gpu_rules_cnt = (uint *) mycalloc (rp_files_cnt, sizeof (uint));
+ all_kernel_rules_cnt = (uint *) mycalloc (rp_files_cnt, sizeof (uint));
- all_gpu_rules_buf = (gpu_rule_t **) mycalloc (rp_files_cnt, sizeof (gpu_rule_t *));
+ all_kernel_rules_buf = (kernel_rule_t **) mycalloc (rp_files_cnt, sizeof (kernel_rule_t *));
}
char rule_buf[BUFSIZ];
for (uint i = 0; i < rp_files_cnt; i++)
{
- uint gpu_rules_avail = 0;
+ uint kernel_rules_avail = 0;
- uint gpu_rules_cnt = 0;
+ uint kernel_rules_cnt = 0;
- gpu_rule_t *gpu_rules_buf = NULL;
+ kernel_rule_t *kernel_rules_buf = NULL;
char *rp_file = rp_files[i];
if (rule_buf[0] == '#') continue;
- if (gpu_rules_avail == gpu_rules_cnt)
+ if (kernel_rules_avail == kernel_rules_cnt)
{
- gpu_rules_buf = (gpu_rule_t *) myrealloc (gpu_rules_buf, gpu_rules_avail * sizeof (gpu_rule_t), INCR_RULES * sizeof (gpu_rule_t));
+ kernel_rules_buf = (kernel_rule_t *) myrealloc (kernel_rules_buf, kernel_rules_avail * sizeof (kernel_rule_t), INCR_RULES * sizeof (kernel_rule_t));
- gpu_rules_avail += INCR_RULES;
+ kernel_rules_avail += INCR_RULES;
}
memset (in, 0, BLOCK_SIZE);
continue;
}
- if (cpu_rule_to_gpu_rule (rule_buf, rule_len, &gpu_rules_buf[gpu_rules_cnt]) == -1)
+ if (cpu_rule_to_kernel_rule (rule_buf, rule_len, &kernel_rules_buf[kernel_rules_cnt]) == -1)
{
- log_info ("WARNING: Cannot convert rule for use on GPU in file %s in line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("WARNING: Cannot convert rule for use on device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
- memset (&gpu_rules_buf[gpu_rules_cnt], 0, sizeof (gpu_rule_t)); // needs to be cleared otherwise we could have some remaining data
+ memset (&kernel_rules_buf[kernel_rules_cnt], 0, sizeof (kernel_rule_t)); // needs to be cleared otherwise we could have some remaining data
continue;
}
/* its so slow
- if (rulefind (&gpu_rules_buf[gpu_rules_cnt], gpu_rules_buf, gpu_rules_cnt, sizeof (gpu_rule_t), sort_by_gpu_rule))
+ if (rulefind (&kernel_rules_buf[kernel_rules_cnt], kernel_rules_buf, kernel_rules_cnt, sizeof (kernel_rule_t), sort_by_kernel_rule))
{
- log_info ("Duplicate rule for use on GPU in file %s in line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("Duplicate rule for use on device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
continue;
}
*/
- gpu_rules_cnt++;
+ kernel_rules_cnt++;
}
fclose (fp);
- all_gpu_rules_cnt[i] = gpu_rules_cnt;
+ all_kernel_rules_cnt[i] = kernel_rules_cnt;
- all_gpu_rules_buf[i] = gpu_rules_buf;
+ all_kernel_rules_buf[i] = kernel_rules_buf;
}
/**
* merge rules or automatic rule generator
*/
- uint gpu_rules_cnt = 0;
+ uint kernel_rules_cnt = 0;
- gpu_rule_t *gpu_rules_buf = NULL;
+ kernel_rule_t *kernel_rules_buf = NULL;
if (attack_mode == ATTACK_MODE_STRAIGHT)
{
if (rp_files_cnt)
{
- gpu_rules_cnt = 1;
+ kernel_rules_cnt = 1;
uint *repeats = (uint *) mycalloc (rp_files_cnt + 1, sizeof (uint));
- repeats[0] = gpu_rules_cnt;
+ repeats[0] = kernel_rules_cnt;
for (uint i = 0; i < rp_files_cnt; i++)
{
- gpu_rules_cnt *= all_gpu_rules_cnt[i];
+ kernel_rules_cnt *= all_kernel_rules_cnt[i];
- repeats[i + 1] = gpu_rules_cnt;
+ repeats[i + 1] = kernel_rules_cnt;
}
- gpu_rules_buf = (gpu_rule_t *) mycalloc (gpu_rules_cnt, sizeof (gpu_rule_t));
+ kernel_rules_buf = (kernel_rule_t *) mycalloc (kernel_rules_cnt, sizeof (kernel_rule_t));
- memset (gpu_rules_buf, 0, gpu_rules_cnt * sizeof (gpu_rule_t));
+ memset (kernel_rules_buf, 0, kernel_rules_cnt * sizeof (kernel_rule_t));
- for (uint i = 0; i < gpu_rules_cnt; i++)
+ for (uint i = 0; i < kernel_rules_cnt; i++)
{
uint out_pos = 0;
- gpu_rule_t *out = &gpu_rules_buf[i];
+ kernel_rule_t *out = &kernel_rules_buf[i];
for (uint j = 0; j < rp_files_cnt; j++)
{
- uint in_off = (i / repeats[j]) % all_gpu_rules_cnt[j];
+ uint in_off = (i / repeats[j]) % all_kernel_rules_cnt[j];
uint in_pos;
- gpu_rule_t *in = &all_gpu_rules_buf[j][in_off];
+ kernel_rule_t *in = &all_kernel_rules_buf[j][in_off];
for (in_pos = 0; in->cmds[in_pos]; in_pos++, out_pos++)
{
}
else if (rp_gen)
{
- uint gpu_rules_avail = 0;
+ uint kernel_rules_avail = 0;
- while (gpu_rules_cnt < rp_gen)
+ while (kernel_rules_cnt < rp_gen)
{
- if (gpu_rules_avail == gpu_rules_cnt)
+ if (kernel_rules_avail == kernel_rules_cnt)
{
- gpu_rules_buf = (gpu_rule_t *) myrealloc (gpu_rules_buf, gpu_rules_avail * sizeof (gpu_rule_t), INCR_RULES * sizeof (gpu_rule_t));
+ kernel_rules_buf = (kernel_rule_t *) myrealloc (kernel_rules_buf, kernel_rules_avail * sizeof (kernel_rule_t), INCR_RULES * sizeof (kernel_rule_t));
- gpu_rules_avail += INCR_RULES;
+ kernel_rules_avail += INCR_RULES;
}
memset (rule_buf, 0, BLOCK_SIZE);
rule_len = (int) generate_random_rule (rule_buf, rp_gen_func_min, rp_gen_func_max);
- if (cpu_rule_to_gpu_rule (rule_buf, rule_len, &gpu_rules_buf[gpu_rules_cnt]) == -1) continue;
+ if (cpu_rule_to_kernel_rule (rule_buf, rule_len, &kernel_rules_buf[kernel_rules_cnt]) == -1) continue;
- gpu_rules_cnt++;
+ kernel_rules_cnt++;
}
}
}
* generate NOP rules
*/
- if (gpu_rules_cnt == 0)
+ if (kernel_rules_cnt == 0)
{
- gpu_rules_buf = (gpu_rule_t *) mymalloc (sizeof (gpu_rule_t));
+ kernel_rules_buf = (kernel_rule_t *) mymalloc (sizeof (kernel_rule_t));
- gpu_rules_buf[gpu_rules_cnt].cmds[0] = RULE_OP_MANGLE_NOOP;
+ kernel_rules_buf[kernel_rules_cnt].cmds[0] = RULE_OP_MANGLE_NOOP;
- gpu_rules_cnt++;
+ kernel_rules_cnt++;
}
- data.gpu_rules_cnt = gpu_rules_cnt;
- data.gpu_rules_buf = gpu_rules_buf;
+ data.kernel_rules_cnt = kernel_rules_cnt;
+ data.kernel_rules_buf = kernel_rules_buf;
/**
- * platform
+ * OpenCL platforms: detect
*/
- cl_platform_id CL_platforms[CL_PLATFORMS_MAX];
+ cl_platform_id platforms[CL_PLATFORMS_MAX];
+
+ cl_uint platforms_cnt = 0;
- uint CL_platforms_cnt = 0;
+ cl_device_id platform_devices[DEVICES_MAX];
- hc_clGetPlatformIDs (CL_PLATFORMS_MAX, CL_platforms, &CL_platforms_cnt);
+ cl_uint platform_devices_cnt;
- if (CL_platforms_cnt == 0)
+ hc_clGetPlatformIDs (CL_PLATFORMS_MAX, platforms, &platforms_cnt);
+
+ if (platforms_cnt == 0)
{
log_error ("ERROR: No OpenCL compatible platform found");
return (-1);
}
- if (CL_platforms_cnt > 1)
+ /**
+ * OpenCL platforms: For each platform check if we need to unset features that we can not use, eg: temp_retain
+ */
+
+ for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- log_error ("ERROR: Too many OpenCL compatible platforms found");
+ cl_platform_id platform = platforms[platform_id];
- return (-1);
+ char platform_vendor[INFOSZ] = { 0 };
+
+ hc_clGetPlatformInfo (platform, CL_PLATFORM_VENDOR, sizeof (platform_vendor), platform_vendor, NULL);
+
+ #ifdef HAVE_HWMON
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ if (strcmp (platform_vendor, CL_VENDOR_NV) == 0)
+ {
+ // make sure that we do not directly control the fan for NVidia
+
+ gpu_temp_retain = 0;
+
+ data.gpu_temp_retain = gpu_temp_retain;
+ }
+ #endif // HAVE_NVML || HAVE_NVAPI
+ #endif
}
- cl_platform_id CL_platform = CL_platforms[0];
+ /**
+ * OpenCL devices: simply push all devices from all platforms into the same device array
+ */
- char CL_platform_vendor[INFOSZ];
+ hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
- memset (CL_platform_vendor, 0, sizeof (CL_platform_vendor));
+ data.devices_param = devices_param;
- hc_clGetPlatformInfo (CL_platform, CL_PLATFORM_VENDOR, sizeof (CL_platform_vendor), CL_platform_vendor, NULL);
+ uint devices_cnt = 0;
- uint vendor_id;
+ uint devices_active = 0;
- if (strcmp (CL_platform_vendor, CL_VENDOR_AMD) == 0)
- {
- vendor_id = VENDOR_ID_AMD;
- }
- else if (strcmp (CL_platform_vendor, CL_VENDOR_NV) == 0)
+ for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- vendor_id = VENDOR_ID_NV;
+ if ((opencl_platforms_filter & (1 << platform_id)) == 0) continue;
- // make sure that we do not directly control the fan for NVidia
+ cl_platform_id platform = platforms[platform_id];
- gpu_temp_retain = 0;
- data.gpu_temp_retain = gpu_temp_retain;
- }
- else
- {
- vendor_id = VENDOR_ID_UNKNOWN;
- }
+ hc_clGetDeviceIDs (platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
- if (vendor_id == VENDOR_ID_UNKNOWN)
- {
- log_error ("Warning: unknown OpenCL vendor '%s' detected", CL_platform_vendor);
+ for (uint platform_devices_id = 0; platform_devices_id < platform_devices_cnt; platform_devices_id++)
+ {
+ const uint device_id = devices_cnt;
- gpu_temp_disable = 1;
- }
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- data.vendor_id = vendor_id;
+ device_param->device = platform_devices[platform_devices_id];
- /**
- * devices
- */
+ device_param->device_id = device_id;
- cl_device_id devices_all[DEVICES_MAX];
- cl_device_id devices[DEVICES_MAX];
+ device_param->platform_devices_id = platform_devices_id;
- uint devices_all_cnt = 0;
+ // vendor_id
- hc_clGetDeviceIDs (CL_platform, CL_DEVICE_TYPE_GPU, DEVICES_MAX, devices_all, (uint *) &devices_all_cnt);
+ cl_uint vendor_id = 0;
- int hm_adapters_all = devices_all_cnt;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_VENDOR_ID, sizeof (vendor_id), &vendor_id, NULL);
- hm_attrs_t hm_adapter_all[DEVICES_MAX];
+ device_param->vendor_id = vendor_id;
- memset (hm_adapter_all, 0, sizeof (hm_adapter_all));
+ // device_type
- if (gpu_temp_disable == 0)
- {
- if (vendor_id == VENDOR_ID_NV)
- {
- #ifdef LINUX
- HM_LIB hm_dll = hm_init ();
+ cl_device_type device_type;
- data.hm_dll = hm_dll;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
- if (hc_NVML_nvmlInit (hm_dll) == NVML_SUCCESS)
- {
- HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
+ device_type &= ~CL_DEVICE_TYPE_DEFAULT;
- int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
+ device_param->device_type = device_type;
- int tmp_out = 0;
+ // device_name
- for (int i = 0; i < tmp_in; i++)
- {
- hm_adapter_all[tmp_out++].adapter_index.nv = nvGPUHandle[i];
- }
+ char *device_name = (char *) mymalloc (INFOSZ);
- hm_adapters_all = tmp_out;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_NAME, INFOSZ, device_name, NULL);
- for (int i = 0; i < tmp_out; i++)
- {
- unsigned int speed;
+ device_param->device_name = device_name;
- if (hc_NVML_nvmlDeviceGetFanSpeed (hm_dll, 1, hm_adapter_all[i].adapter_index.nv, &speed) != NVML_ERROR_NOT_SUPPORTED) hm_adapter_all[i].fan_supported = 1;
- }
- }
- #endif
+ // device_version
- #ifdef WIN
- if (NvAPI_Initialize () == NVAPI_OK)
+ char *device_version = (char *) mymalloc (INFOSZ);
+
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_VERSION, INFOSZ, device_version, NULL);
+
+ device_param->device_version = device_version;
+
+ if (strstr (device_version, "pocl"))
{
- HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
+ // pocl returns the real vendor_id in CL_DEVICE_VENDOR_ID which causes many problems because of hms and missing amd_bfe () etc
+ // we need to overwrite vendor_id to avoid this. maybe open pocl issue?
- int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
+ cl_uint vendor_id = 0xffff;
- int tmp_out = 0;
+ device_param->vendor_id = vendor_id;
+ }
- for (int i = 0; i < tmp_in; i++)
- {
- hm_adapter_all[tmp_out++].adapter_index.nv = nvGPUHandle[i];
- }
+ // max_compute_units
- hm_adapters_all = tmp_out;
+ cl_uint vector_width;
- for (int i = 0; i < tmp_out; i++)
+ if (attack_mode == ATTACK_MODE_BF)
+ {
+ if (opencl_vector_width == OPENCL_VECTOR_WIDTH)
{
- NvU32 speed;
-
- if (NvAPI_GPU_GetTachReading (hm_adapter_all[i].adapter_index.nv, &speed) != NVAPI_NOT_SUPPORTED) hm_adapter_all[i].fan_supported = 1;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
+ }
+ else
+ {
+ vector_width = opencl_vector_width;
}
}
+ else
+ {
+ vector_width = 1;
+ }
+
+ if (vector_width > 8) vector_width = 8;
+
+ device_param->vector_width = vector_width;
+
+ // max_compute_units
+
+ cl_uint device_processors;
+
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (device_processors), &device_processors, NULL);
+
+ device_param->device_processors = device_processors;
+
+ // max_mem_alloc_size
+
+ cl_ulong device_maxmem_alloc;
+
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (device_maxmem_alloc), &device_maxmem_alloc, NULL);
+
+ device_param->device_maxmem_alloc = device_maxmem_alloc;
+
+ // max_mem_alloc_size
+
+ cl_ulong device_global_mem;
+
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (device_global_mem), &device_global_mem, NULL);
+
+ device_param->device_global_mem = device_global_mem;
+
+ // max_clock_frequency
+
+ cl_uint device_maxclock_frequency;
+
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (device_maxclock_frequency), &device_maxclock_frequency, NULL);
+
+ device_param->device_maxclock_frequency = device_maxclock_frequency;
+
+ // skipped
+
+ const u32 skipped1 = ((devices_filter & (1 << device_id)) == 0);
+ const u32 skipped2 = ((device_types_filter & (device_type)) == 0);
+
+ device_param->skipped = (skipped1 || skipped2);
+
+ // driver_version
+
+ char *driver_version = (char *) mymalloc (INFOSZ);
+
+ hc_clGetDeviceInfo (device_param->device, CL_DRIVER_VERSION, INFOSZ, driver_version, NULL);
+
+ device_param->driver_version = driver_version;
+
+ // device_name_chksum
+
+ char *device_name_chksum = (char *) mymalloc (INFOSZ);
+
+ #if __x86_64__
+ snprintf (device_name_chksum, INFOSZ - 1, "%u-%u-%u-%s-%s-%s-%u", 64, device_param->vendor_id, device_param->vector_width, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ #else
+ snprintf (device_name_chksum, INFOSZ - 1, "%u-%u-%u-%s-%s-%s-%u", 32, device_param->vendor_id, device_param->vector_width, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
#endif
- }
- if (vendor_id == VENDOR_ID_AMD)
- {
- HM_LIB hm_dll = hm_init ();
+ uint device_name_digest[4];
- data.hm_dll = hm_dll;
+ device_name_digest[0] = 0;
+ device_name_digest[1] = 0;
+ device_name_digest[2] = 0;
+ device_name_digest[3] = 0;
- if (hc_ADL_Main_Control_Create (hm_dll, ADL_Main_Memory_Alloc, 0) == ADL_OK)
- {
- // total number of adapters
+ md5_64 ((uint *) device_name_chksum, device_name_digest);
- int hm_adapters_num;
+ sprintf (device_name_chksum, "%08x", device_name_digest[0]);
- if (get_adapters_num_amd (hm_dll, &hm_adapters_num) != 0) return (-1);
+ device_param->device_name_chksum = device_name_chksum;
- // adapter info
+ // device_processor_cores
- LPAdapterInfo lpAdapterInfo = hm_get_adapter_info_amd (hm_dll, hm_adapters_num);
+ if (device_type & CL_DEVICE_TYPE_CPU)
+ {
+ cl_uint device_processor_cores = 1;
- if (lpAdapterInfo == NULL) return (-1);
+ device_param->device_processor_cores = device_processor_cores;
+ }
- // get a list (of ids of) valid/usable adapters
+ if (device_type & CL_DEVICE_TYPE_GPU)
+ {
+ if (vendor_id == VENDOR_ID_AMD)
+ {
+ cl_uint device_processor_cores = 0;
- int num_adl_adapters = 0;
+ #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
- uint32_t *valid_adl_device_list = hm_get_list_valid_adl_adapters (hm_adapters_num, &num_adl_adapters, lpAdapterInfo);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
- if (num_adl_adapters > 0)
+ device_param->device_processor_cores = device_processor_cores;
+ }
+ else if (vendor_id == VENDOR_ID_NV)
{
- hc_thread_mutex_lock (mux_adl);
+ cl_uint kernel_exec_timeout = 0;
- // hm_get_opencl_busid_devid (hm_adapter_all, devices_all_cnt, devices_all);
+ #define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
- hm_get_adapter_index_amd (hm_adapter_all, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
- hm_get_overdrive_version (hm_dll, hm_adapter_all, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
- hm_check_fanspeed_control (hm_dll, hm_adapter_all, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
+ device_param->kernel_exec_timeout = kernel_exec_timeout;
- hc_thread_mutex_unlock (mux_adl);
- }
+ cl_uint device_processor_cores = 0;
- hm_adapters_all = num_adl_adapters;
+ #define CL_DEVICE_WARP_SIZE_NV 0x4003
- myfree (valid_adl_device_list);
- myfree (lpAdapterInfo);
- }
- }
- }
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
- if (hm_adapters_all == 0)
- {
- gpu_temp_disable = 1;
- }
+ device_param->device_processor_cores = device_processor_cores;
- if (gpu_temp_disable == 1)
- {
- gpu_temp_abort = 0;
- gpu_temp_retain = 0;
- }
+ cl_uint sm_minor = 0;
+ cl_uint sm_major = 0;
- /**
- * enable custom signal handler(s)
- */
+ #define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
+ #define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
- if (benchmark == 0)
- {
- hc_signal (sigHandler_default);
- }
- else
- {
- hc_signal (sigHandler_benchmark);
- }
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
- /**
- * devices mask and properties
- */
+ device_param->sm_minor = sm_minor;
+ device_param->sm_major = sm_major;
+ }
+ else
+ {
+ cl_uint device_processor_cores = 1;
- uint devices_cnt = 0;
+ device_param->device_processor_cores = device_processor_cores;
+ }
+ }
- for (uint device_all_id = 0; device_all_id < devices_all_cnt; device_all_id++)
- {
- if (gpu_devicemask)
- {
- uint device_all_id_mask = 1 << device_all_id;
+ // display results
- if ((device_all_id_mask & gpu_devicemask) != device_all_id_mask)
+ if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
{
- if (quiet == 0 && algorithm_pos == 0) log_info ("Device #%d: skipped by user", device_all_id_mask + 1);
-
- continue;
+ if (device_param->skipped == 0)
+ {
+ log_info ("Device #%u: %s, %lu/%lu MB allocatable, %dMhz, %uMCU",
+ device_id + 1,
+ device_name,
+ (unsigned int) (device_maxmem_alloc / 1024 / 1024),
+ (unsigned int) (device_global_mem / 1024 / 1024),
+ (unsigned int) (device_maxclock_frequency),
+ (unsigned int) device_processors);
+ }
+ else
+ {
+ log_info ("Device #%u: %s, skipped",
+ device_id + 1,
+ device_name);
+ }
}
- }
- const uint device_id = devices_cnt;
+ // common driver check
- devices[device_id] = devices_all[device_all_id];
+ if (device_param->skipped == 0)
+ {
+ if (strstr (device_version, "pocl"))
+ {
+ if (force == 0)
+ {
+ log_info ("");
+ log_info ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
+ log_info ("You are STRONGLY encouraged not to use it");
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
- memcpy (&data.hm_device[device_id], &hm_adapter_all[device_all_id], sizeof (hm_attrs_t));
+ return (-1);
+ }
+ }
- char device_name[INFOSZ];
+ if (device_type & CL_DEVICE_TYPE_GPU)
+ {
+ if (vendor_id == VENDOR_ID_NV)
+ {
+ if (device_param->kernel_exec_timeout != 0)
+ {
+ if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
+ if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
+ }
+ }
+ else if (vendor_id == VENDOR_ID_AMD)
+ {
+ int catalyst_check = (force == 1) ? 0 : 1;
- memset (device_name, 0, sizeof (device_name));
+ int catalyst_warn = 0;
- cl_ulong global_mem_size;
- cl_uint max_clock_frequency;
- cl_uint max_compute_units;
+ int catalyst_broken = 0;
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_NAME, sizeof (device_name), &device_name, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (global_mem_size), &global_mem_size, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (max_clock_frequency), &max_clock_frequency, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (max_compute_units), &max_compute_units, NULL);
+ if (catalyst_check == 1)
+ {
+ catalyst_warn = 1;
- if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
- {
- log_info ("Device #%u: %s, %luMB, %dMhz, %uMCU",
- device_all_id + 1,
- device_name,
- (unsigned int) (global_mem_size / 1024 / 1024),
- (unsigned int) (max_clock_frequency),
- (unsigned int) max_compute_units);
- }
+ // v14.9 and higher
+ if (atoi (device_param->driver_version) >= 1573)
+ {
+ catalyst_warn = 0;
+ }
+
+ catalyst_check = 0;
+ }
+
+ if (catalyst_broken == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! The installed catalyst driver in your system is known to be broken!");
+ log_info ("It will pass over cracked hashes and does not report them as cracked");
+ log_info ("You are STRONGLY encouraged not to use it");
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
- devices_cnt++;
+ return (-1);
+ }
+
+ if (catalyst_warn == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
+ log_info ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
+ log_info ("See oclHashcat's homepage for official supported catalyst drivers");
+ #ifdef _WIN
+ log_info ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
+ #endif
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
+
+ return (-1);
+ }
+ }
+ }
+
+ devices_active++;
+ }
+
+ // next please
+
+ devices_cnt++;
+ }
}
- if (devices_cnt == 0)
+ if (devices_active == 0)
{
- log_error ("ERROR: No devices left that matches your specification.");
+ log_error ("ERROR: No devices found/left");
return (-1);
}
data.devices_cnt = devices_cnt;
+ data.devices_active = devices_active;
+
if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
{
log_info ("");
}
/**
- * inform the user
+ * OpenCL devices: allocate buffer for device specific information
+ */
+
+ #ifdef HAVE_HWMON
+ int *temp_retain_fanspeed_value = (int *) mycalloc (devices_cnt, sizeof (int));
+
+ #ifdef HAVE_ADL
+ ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (devices_cnt, sizeof (ADLOD6MemClockState));
+
+ int *od_power_control_status = (int *) mycalloc (devices_cnt, sizeof (int));
+ #endif // ADL
+ #endif
+
+ /**
+ * enable custom signal handler(s)
*/
- // gpu temp sanity check
+ if (benchmark == 0)
+ {
+ hc_signal (sigHandler_default);
+ }
+ else
+ {
+ hc_signal (sigHandler_benchmark);
+ }
+
+ /**
+ * User-defined GPU temp handling
+ */
+
+ #ifdef HAVE_HWMON
+ if (gpu_temp_disable == 1)
+ {
+ gpu_temp_abort = 0;
+ gpu_temp_retain = 0;
+ }
if ((gpu_temp_abort != 0) && (gpu_temp_retain != 0))
{
data.gpu_temp_disable = gpu_temp_disable;
data.gpu_temp_abort = gpu_temp_abort;
data.gpu_temp_retain = gpu_temp_retain;
+ #endif
+
+ /**
+ * inform the user
+ */
if (data.quiet == 0)
{
if (attack_mode == ATTACK_MODE_STRAIGHT)
{
- log_info ("Rules: %u", gpu_rules_cnt);
+ log_info ("Rules: %u", kernel_rules_cnt);
}
if (opti_type)
for (uint i = 0; i < 32; i++)
{
- const uint opti_bit = 1 << i;
+ const uint opti_bit = 1u << i;
if (opti_type & opti_bit) log_info ("* %s", stroptitype (opti_bit));
}
* Watchdog and Temperature balance
*/
+ #ifdef HAVE_HWMON
if (gpu_temp_abort == 0)
{
log_info ("Watchdog: Temperature abort trigger disabled");
{
log_info ("Watchdog: Temperature retain trigger set to %uc", gpu_temp_retain);
}
+ #endif
}
+ if (data.quiet == 0) log_info ("");
+
/**
- * devices init
+ * HM devices: init
*/
- int *temp_retain_fanspeed_value = (int *) mycalloc (devices_cnt, sizeof (int));
-
- ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (devices_cnt, sizeof (ADLOD6MemClockState));
-
- int *od_power_control_status = (int *) mycalloc (devices_cnt, sizeof (int));
-
- hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (devices_cnt, sizeof (hc_device_param_t));
+ #ifdef HAVE_HWMON
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ hm_attrs_t hm_adapters_nv[DEVICES_MAX] = { { { 0 }, 0, 0 } };
+ #endif
- data.devices_param = devices_param;
+ #ifdef HAVE_ADL
+ hm_attrs_t hm_adapters_amd[DEVICES_MAX] = { { { 0 }, 0, 0 } };
+ #endif
- for (uint device_id = 0; device_id < devices_cnt; device_id++)
+ if (gpu_temp_disable == 0)
{
- hc_device_param_t *device_param = &data.devices_param[device_id];
-
- cl_device_id device = devices[device_id];
+ #if defined(WIN) && defined(HAVE_NVAPI)
+ if (NvAPI_Initialize () == NVAPI_OK)
+ {
+ HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
- device_param->device = device;
+ int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
- cl_uint max_compute_units = 0;
+ int tmp_out = 0;
- hc_clGetDeviceInfo (device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (max_compute_units), &max_compute_units, NULL);
+ for (int i = 0; i < tmp_in; i++)
+ {
+ hm_adapters_nv[tmp_out++].adapter_index.nv = nvGPUHandle[i];
+ }
- device_param->gpu_processors = max_compute_units;
+ for (int i = 0; i < tmp_out; i++)
+ {
+ NvU32 speed;
- cl_ulong max_mem_alloc_size = 0;
+ if (NvAPI_GPU_GetTachReading (hm_adapters_nv[i].adapter_index.nv, &speed) != NVAPI_NOT_SUPPORTED) hm_adapters_nv[i].fan_supported = 1;
+ }
+ }
+ #endif // WIN && HAVE_NVAPI
- hc_clGetDeviceInfo (device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (max_mem_alloc_size), &max_mem_alloc_size, NULL);
+ #if defined(LINUX) && defined(HAVE_NVML)
+ HM_LIB hm_dll_nv = hm_init (VENDOR_ID_NV);
- device_param->gpu_maxmem_alloc = max_mem_alloc_size;
+ data.hm_dll_nv = hm_dll_nv;
- char tmp[INFOSZ], t1[64];
+ if (hm_dll_nv)
+ {
+ if (hc_NVML_nvmlInit (hm_dll_nv) == NVML_SUCCESS)
+ {
+ HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
- memset (tmp, 0, sizeof (tmp));
+ int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
- hc_clGetDeviceInfo (device, CL_DEVICE_NAME, sizeof (tmp), &tmp, NULL);
+ int tmp_out = 0;
- device_param->device_name = mystrdup (tmp);
+ for (int i = 0; i < tmp_in; i++)
+ {
+ hm_adapters_nv[tmp_out++].adapter_index.nv = nvGPUHandle[i];
+ }
- memset (tmp, 0, sizeof (tmp));
+ for (int i = 0; i < tmp_out; i++)
+ {
+ unsigned int speed;
- hc_clGetDeviceInfo (device, CL_DEVICE_VERSION, sizeof (tmp), &tmp, NULL);
+ if (hc_NVML_nvmlDeviceGetFanSpeed (hm_dll_nv, 1, hm_adapters_nv[i].adapter_index.nv, &speed) != NVML_ERROR_NOT_SUPPORTED) hm_adapters_nv[i].fan_supported = 1;
+ }
+ }
+ }
+ #endif // LINUX && HAVE_NVML
- memset (t1, 0, sizeof (t1));
+ #ifdef HAVE_ADL
+ HM_LIB hm_dll_amd = hm_init (VENDOR_ID_AMD);
- sscanf (tmp, "%*16s %*16s %*16s (%[^)]16s)", t1);
+ data.hm_dll_amd = hm_dll_amd;
- device_param->device_version = mystrdup (t1);
+ if (hm_dll_amd)
+ {
+ if (hc_ADL_Main_Control_Create (hm_dll_amd, ADL_Main_Memory_Alloc, 0) == ADL_OK)
+ {
+ // total number of adapters
- memset (tmp, 0, sizeof (tmp));
+ int hm_adapters_num;
- hc_clGetDeviceInfo (device, CL_DRIVER_VERSION, sizeof (tmp), &tmp, NULL);
+ if (get_adapters_num_amd (hm_dll_amd, &hm_adapters_num) != 0) return (-1);
- device_param->driver_version = mystrdup (tmp);
+ // adapter info
- if (vendor_id == VENDOR_ID_NV)
- {
- cl_uint sm_minor = 0;
- cl_uint sm_major = 0;
+ LPAdapterInfo lpAdapterInfo = hm_get_adapter_info_amd (hm_dll_amd, hm_adapters_num);
- #define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
- #define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
+ if (lpAdapterInfo == NULL) return (-1);
- hc_clGetDeviceInfo (device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
- hc_clGetDeviceInfo (device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
+ // get a list (of ids of) valid/usable adapters
- device_param->sm_minor = sm_minor;
- device_param->sm_major = sm_major;
- }
+ int num_adl_adapters = 0;
- /**
- * catalyst driver check
- */
+ u32 *valid_adl_device_list = hm_get_list_valid_adl_adapters (hm_adapters_num, &num_adl_adapters, lpAdapterInfo);
- if (vendor_id == VENDOR_ID_AMD)
- {
- int catalyst_check = (force == 1) ? 0 : 1;
+ if (num_adl_adapters > 0)
+ {
+ hc_thread_mutex_lock (mux_adl);
- int catalyst_warn = 0;
+ // hm_get_opencl_busid_devid (hm_adapters_amd, devices_all_cnt, devices_all);
- int catalyst_broken = 0;
+ hm_get_adapter_index_amd (hm_adapters_amd, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
- if (catalyst_check == 1)
- {
- catalyst_warn = 1;
+ hm_get_overdrive_version (hm_dll_amd, hm_adapters_amd, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
+ hm_check_fanspeed_control (hm_dll_amd, hm_adapters_amd, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
- // v14.9 and higher
- if ((atoi (device_param->device_version) >= 1573)
- && (atoi (device_param->driver_version) >= 1573))
- {
- catalyst_warn = 0;
+ hc_thread_mutex_unlock (mux_adl);
}
- /*
- // v14.9
- if ((strstr (device_param->device_version, "1573.") != NULL)
- && (strstr (device_param->driver_version, "1573.") != NULL))
- {
- catalyst_warn = 0;
- }
+ myfree (valid_adl_device_list);
+ myfree (lpAdapterInfo);
+ }
+ }
+ #endif // HAVE_ADL
+ }
- // v14.12 -- version overlaps with v15.4 beta
- if ((strstr (device_param->device_version, "1642.") != NULL)
- && (strstr (device_param->driver_version, "1642.") != NULL))
- {
- catalyst_broken = 1;
- }
+ /**
+ * HM devices: copy
+ */
- // v15.4 (Beta, Windows only release)
- if ((strstr (device_param->device_version, "1642.") != NULL)
- && (strstr (device_param->driver_version, "1642.") != NULL))
- {
- catalyst_warn = 0;
- }
+ if (gpu_temp_disable == 0)
+ {
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- // v15.5 (Release, Linux)
- if ((strstr (device_param->device_version, "1702.") != NULL)
- && (strstr (device_param->driver_version, "1702.") != NULL))
- {
- catalyst_warn = 0;
- }
+ if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
- // v15.3 (Beta, Ubuntu repository release)
- if ((strstr (device_param->device_version, "1729.") != NULL)
- && (strstr (device_param->driver_version, "1729.") != NULL))
- {
- catalyst_warn = 0;
- }
- */
+ if (device_param->skipped) continue;
- catalyst_check = 0;
- }
+ const uint platform_devices_id = device_param->platform_devices_id;
- if (catalyst_broken == 1)
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ if (device_param->vendor_id == VENDOR_ID_NV)
{
- log_error ("");
- log_error ("ATTENTION! The installed GPU driver in your system is known to be broken!");
- log_error ("It will pass over cracked hashes and does not report them as cracked");
- log_error ("You are STRONGLY encouraged not to use it");
- log_error ("You can use --force to override this but do not post error reports if you do so");
-
- return (-1);
+ memcpy (&data.hm_device[device_id], &hm_adapters_nv[platform_devices_id], sizeof (hm_attrs_t));
}
+ #endif
- if (catalyst_warn == 1)
+ #ifdef HAVE_ADL
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
- log_error ("");
- log_error ("ATTENTION! Unsupported or incorrect installed GPU driver detected!");
- log_error ("You are STRONGLY encouraged to use the official supported GPU driver for good reasons");
- log_error ("See oclHashcat's homepage for official supported GPU drivers");
- #ifdef _WIN
- log_error ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
- #endif
- log_error ("You can use --force to override this but do not post error reports if you do so");
-
- return (-1);
+ memcpy (&data.hm_device[device_id], &hm_adapters_amd[platform_devices_id], sizeof (hm_attrs_t));
}
+ #endif
}
}
* Driver / ADL bug?
*/
- if (vendor_id == VENDOR_ID_AMD)
+ #ifdef HAVE_ADL
+ if (powertune_enable == 1)
{
- if (powertune_enable == 1)
+ hc_thread_mutex_lock (mux_adl);
+
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
{
- hc_thread_mutex_lock (mux_adl);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- for (uint i = 0; i < devices_cnt; i++)
+ if (data.hm_device[device_id].od_version == 6)
{
- if (data.hm_device[i].od_version == 6)
+ // set powertune value only
+
+ int powertune_supported = 0;
+
+ int ADL_rc = 0;
+
+ if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
- // set powertune value only
+ log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
- int powertune_supported = 0;
+ return (-1);
+ }
- int ADL_rc = 0;
+ if (powertune_supported != 0)
+ {
+ // powertune set
+ ADLOD6PowerControlInfo powertune = {0, 0, 0, 0, 0};
- if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll, data.hm_device[i].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune)) != ADL_OK)
{
- log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
+ log_error ("ERROR: Failed to get current ADL PowerControl settings");
return (-1);
}
- if (powertune_supported != 0)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
{
- // powertune set
- ADLOD6PowerControlInfo powertune = {0, 0, 0, 0, 0};
-
- if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll, data.hm_device[i].adapter_index.amd, &powertune)) != ADL_OK)
- {
- log_error ("ERROR: Failed to get current ADL PowerControl settings");
-
- return (-1);
- }
-
- if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll, data.hm_device[i].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
- {
- log_error ("ERROR: Failed to set new ADL PowerControl values");
+ log_error ("ERROR: Failed to set new ADL PowerControl values");
- return (-1);
- }
+ return (-1);
}
}
}
-
- hc_thread_mutex_unlock (mux_adl);
}
+
+ hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADK
+ #endif // HAVE_HWMON
- uint gpu_blocks_all = 0;
+ uint kernel_blocks_all = 0;
for (uint device_id = 0; device_id < devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
/**
* device properties
*/
- char *device_name = device_param->device_name;
- char *device_version = device_param->device_version;
- char *driver_version = device_param->driver_version;
+ char *device_name_chksum = device_param->device_name_chksum;
- uint gpu_processors = device_param->gpu_processors;
+ uint device_processors = device_param->device_processors;
+
+ uint device_processor_cores = device_param->device_processor_cores;
+
+ cl_device_type device_type = device_param->device_type;
/**
* create context for each device
* create input buffers on device
*/
- uint gpu_threads = GPU_THREADS;
+ uint kernel_threads = KERNEL_THREADS;
+
+ // bcrypt
+ if (hash_mode == 3200) kernel_threads = 8;
+ if (hash_mode == 9000) kernel_threads = 8;
- if (hash_mode == 3200) gpu_threads = 8;
- if (hash_mode == 9000) gpu_threads = 8;
+ if (device_type & CL_DEVICE_TYPE_CPU)
+ {
+ // CPU still need lots of workitems, don't know why...
+ // for testing phase, lets start with this
+
+// kernel_accel = 1;
+ }
- uint gpu_power = gpu_processors * gpu_threads * gpu_accel;
- uint gpu_blocks = gpu_power;
+ uint kernel_power = device_processors * kernel_threads * kernel_accel;
+ uint kernel_blocks = kernel_power;
- device_param->gpu_threads = gpu_threads;
- device_param->gpu_power_user = gpu_power;
- device_param->gpu_blocks_user = gpu_blocks;
+ device_param->kernel_threads = kernel_threads;
+ device_param->kernel_power_user = kernel_power;
+ device_param->kernel_blocks_user = kernel_blocks;
- gpu_blocks_all += gpu_blocks;
+ kernel_blocks_all += kernel_blocks;
- uint size_pws = gpu_power * sizeof (pw_t);
+ uint size_pws = kernel_power * sizeof (pw_t);
uint size_tmps = 4;
switch (hash_mode)
{
- case 400: size_tmps = gpu_blocks * sizeof (phpass_tmp_t); break;
- case 500: size_tmps = gpu_blocks * sizeof (md5crypt_tmp_t); break;
- case 501: size_tmps = gpu_blocks * sizeof (md5crypt_tmp_t); break;
- case 1600: size_tmps = gpu_blocks * sizeof (md5crypt_tmp_t); break;
- case 1800: size_tmps = gpu_blocks * sizeof (sha512crypt_tmp_t); break;
- case 2100: size_tmps = gpu_blocks * sizeof (dcc2_tmp_t); break;
- case 2500: size_tmps = gpu_blocks * sizeof (wpa_tmp_t); break;
- case 3200: size_tmps = gpu_blocks * sizeof (bcrypt_tmp_t); break;
- case 5200: size_tmps = gpu_blocks * sizeof (pwsafe3_tmp_t); break;
- case 5800: size_tmps = gpu_blocks * sizeof (androidpin_tmp_t); break;
+ case 400: size_tmps = kernel_blocks * sizeof (phpass_tmp_t); break;
+ case 500: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
+ case 501: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
+ case 1600: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
+ case 1800: size_tmps = kernel_blocks * sizeof (sha512crypt_tmp_t); break;
+ case 2100: size_tmps = kernel_blocks * sizeof (dcc2_tmp_t); break;
+ case 2500: size_tmps = kernel_blocks * sizeof (wpa_tmp_t); break;
+ case 3200: size_tmps = kernel_blocks * sizeof (bcrypt_tmp_t); break;
+ case 5200: size_tmps = kernel_blocks * sizeof (pwsafe3_tmp_t); break;
+ case 5800: size_tmps = kernel_blocks * sizeof (androidpin_tmp_t); break;
case 6211:
case 6212:
- case 6213: size_tmps = gpu_blocks * sizeof (tc_tmp_t); break;
+ case 6213: size_tmps = kernel_blocks * sizeof (tc_tmp_t); break;
case 6221:
case 6222:
- case 6223: size_tmps = gpu_blocks * sizeof (tc64_tmp_t); break;
+ case 6223: size_tmps = kernel_blocks * sizeof (tc64_tmp_t); break;
case 6231:
case 6232:
- case 6233: size_tmps = gpu_blocks * sizeof (tc_tmp_t); break;
+ case 6233: size_tmps = kernel_blocks * sizeof (tc_tmp_t); break;
case 6241:
case 6242:
- case 6243: size_tmps = gpu_blocks * sizeof (tc_tmp_t); break;
- case 6300: size_tmps = gpu_blocks * sizeof (md5crypt_tmp_t); break;
- case 6400: size_tmps = gpu_blocks * sizeof (sha256aix_tmp_t); break;
- case 6500: size_tmps = gpu_blocks * sizeof (sha512aix_tmp_t); break;
- case 6600: size_tmps = gpu_blocks * sizeof (agilekey_tmp_t); break;
- case 6700: size_tmps = gpu_blocks * sizeof (sha1aix_tmp_t); break;
- case 6800: size_tmps = gpu_blocks * sizeof (lastpass_tmp_t); break;
- case 7100: size_tmps = gpu_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 7200: size_tmps = gpu_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 7400: size_tmps = gpu_blocks * sizeof (sha256crypt_tmp_t); break;
- case 7900: size_tmps = gpu_blocks * sizeof (drupal7_tmp_t); break;
- case 8200: size_tmps = gpu_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 8800: size_tmps = gpu_blocks * sizeof (androidfde_tmp_t); break;
- case 8900: size_tmps = gpu_blocks * sizeof (scrypt_tmp_t); break;
- case 9000: size_tmps = gpu_blocks * sizeof (pwsafe2_tmp_t); break;
- case 9100: size_tmps = gpu_blocks * sizeof (lotus8_tmp_t); break;
- case 9200: size_tmps = gpu_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 9300: size_tmps = gpu_blocks * sizeof (scrypt_tmp_t); break;
- case 9400: size_tmps = gpu_blocks * sizeof (office2007_tmp_t); break;
- case 9500: size_tmps = gpu_blocks * sizeof (office2010_tmp_t); break;
- case 9600: size_tmps = gpu_blocks * sizeof (office2013_tmp_t); break;
- case 10000: size_tmps = gpu_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 10200: size_tmps = gpu_blocks * sizeof (cram_md5_t); break;
- case 10300: size_tmps = gpu_blocks * sizeof (saph_sha1_tmp_t); break;
- case 10500: size_tmps = gpu_blocks * sizeof (pdf14_tmp_t); break;
- case 10700: size_tmps = gpu_blocks * sizeof (pdf17l8_tmp_t); break;
- case 10900: size_tmps = gpu_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
- case 11300: size_tmps = gpu_blocks * sizeof (bitcoin_wallet_tmp_t); break;
- case 11600: size_tmps = gpu_blocks * sizeof (seven_zip_tmp_t); break;
- case 11900: size_tmps = gpu_blocks * sizeof (pbkdf2_md5_tmp_t); break;
- case 12000: size_tmps = gpu_blocks * sizeof (pbkdf2_sha1_tmp_t); break;
- case 12100: size_tmps = gpu_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
- case 12200: size_tmps = gpu_blocks * sizeof (ecryptfs_tmp_t); break;
- case 12300: size_tmps = gpu_blocks * sizeof (oraclet_tmp_t); break;
- case 12400: size_tmps = gpu_blocks * sizeof (bsdicrypt_tmp_t); break;
- case 12500: size_tmps = gpu_blocks * sizeof (rar3_tmp_t); break;
- case 12700: size_tmps = gpu_blocks * sizeof (mywallet_tmp_t); break;
- case 12800: size_tmps = gpu_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 6243: size_tmps = kernel_blocks * sizeof (tc_tmp_t); break;
+ case 6300: size_tmps = kernel_blocks * sizeof (md5crypt_tmp_t); break;
+ case 6400: size_tmps = kernel_blocks * sizeof (sha256aix_tmp_t); break;
+ case 6500: size_tmps = kernel_blocks * sizeof (sha512aix_tmp_t); break;
+ case 6600: size_tmps = kernel_blocks * sizeof (agilekey_tmp_t); break;
+ case 6700: size_tmps = kernel_blocks * sizeof (sha1aix_tmp_t); break;
+ case 6800: size_tmps = kernel_blocks * sizeof (lastpass_tmp_t); break;
+ case 7100: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 7200: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 7400: size_tmps = kernel_blocks * sizeof (sha256crypt_tmp_t); break;
+ case 7900: size_tmps = kernel_blocks * sizeof (drupal7_tmp_t); break;
+ case 8200: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 8800: size_tmps = kernel_blocks * sizeof (androidfde_tmp_t); break;
+ case 8900: size_tmps = kernel_blocks * sizeof (scrypt_tmp_t); break;
+ case 9000: size_tmps = kernel_blocks * sizeof (pwsafe2_tmp_t); break;
+ case 9100: size_tmps = kernel_blocks * sizeof (lotus8_tmp_t); break;
+ case 9200: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 9300: size_tmps = kernel_blocks * sizeof (scrypt_tmp_t); break;
+ case 9400: size_tmps = kernel_blocks * sizeof (office2007_tmp_t); break;
+ case 9500: size_tmps = kernel_blocks * sizeof (office2010_tmp_t); break;
+ case 9600: size_tmps = kernel_blocks * sizeof (office2013_tmp_t); break;
+ case 10000: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 10200: size_tmps = kernel_blocks * sizeof (cram_md5_t); break;
+ case 10300: size_tmps = kernel_blocks * sizeof (saph_sha1_tmp_t); break;
+ case 10500: size_tmps = kernel_blocks * sizeof (pdf14_tmp_t); break;
+ case 10700: size_tmps = kernel_blocks * sizeof (pdf17l8_tmp_t); break;
+ case 10900: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 11300: size_tmps = kernel_blocks * sizeof (bitcoin_wallet_tmp_t); break;
+ case 11600: size_tmps = kernel_blocks * sizeof (seven_zip_tmp_t); break;
+ case 11900: size_tmps = kernel_blocks * sizeof (pbkdf2_md5_tmp_t); break;
+ case 12000: size_tmps = kernel_blocks * sizeof (pbkdf2_sha1_tmp_t); break;
+ case 12100: size_tmps = kernel_blocks * sizeof (pbkdf2_sha512_tmp_t); break;
+ case 12200: size_tmps = kernel_blocks * sizeof (ecryptfs_tmp_t); break;
+ case 12300: size_tmps = kernel_blocks * sizeof (oraclet_tmp_t); break;
+ case 12400: size_tmps = kernel_blocks * sizeof (bsdicrypt_tmp_t); break;
+ case 12500: size_tmps = kernel_blocks * sizeof (rar3_tmp_t); break;
+ case 12700: size_tmps = kernel_blocks * sizeof (mywallet_tmp_t); break;
+ case 12800: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 12900: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
+ case 13000: size_tmps = kernel_blocks * sizeof (pbkdf2_sha256_tmp_t); break;
};
uint size_hooks = 4;
device_param->size_root_css = size_root_css;
device_param->size_markov_css = size_markov_css;
- uint size_results = GPU_THREADS * sizeof (uint);
+ uint size_results = KERNEL_THREADS * sizeof (uint);
device_param->size_results = size_results;
- uint size_rules = gpu_rules_cnt * sizeof (gpu_rule_t);
- uint size_rules_c = GPU_RULES * sizeof (gpu_rule_t);
+ uint size_rules = kernel_rules_cnt * sizeof (kernel_rule_t);
+ uint size_rules_c = KERNEL_RULES * sizeof (kernel_rule_t);
+
uint size_plains = digests_cnt * sizeof (plain_t);
uint size_salts = salts_cnt * sizeof (salt_t);
uint size_esalts = salts_cnt * esalt_size;
device_param->size_shown = size_shown;
device_param->size_salts = size_salts;
- uint size_combs = GPU_COMBS * sizeof (comb_t);
- uint size_bfs = GPU_BFS * sizeof (bf_t);
+ uint size_combs = KERNEL_COMBS * sizeof (comb_t);
+ uint size_bfs = KERNEL_BFS * sizeof (bf_t);
uint size_tm = 32 * sizeof (bs_word_t);
- uint64_t size_scryptV = 1;
+ u64 size_scryptV = 1;
if ((hash_mode == 8900) || (hash_mode == 9300))
{
- #define SHADER_PER_MP 8
- #define WAVEFRONTS 64
-
- uint tmto_start = 2;
- uint tmto_stop = 1024;
+ uint tmto_start = 0;
+ uint tmto_stop = 10;
if (scrypt_tmto)
{
- tmto_start = 1 << scrypt_tmto;
- tmto_stop = tmto_start + 1;
+ tmto_start = scrypt_tmto;
+ }
+ else
+ {
+ // in case the user did not specify the tmto manually
+ // use some values known to run best (tested on 290x for AMD and 980ti for NV)
+ // but set the lower end only in case the user has a device with too less memory
+
+ if (hash_mode == 8900)
+ {
+ if (device_param->vendor_id == VENDOR_ID_AMD)
+ {
+ tmto_start = 1;
+ }
+ else if (device_param->vendor_id == VENDOR_ID_NV)
+ {
+ tmto_start = 3;
+ }
+ }
+ else if (hash_mode == 9300)
+ {
+ if (device_param->vendor_id == VENDOR_ID_AMD)
+ {
+ tmto_start = 3;
+ }
+ else if (device_param->vendor_id == VENDOR_ID_NV)
+ {
+ tmto_start = 5;
+ }
+ }
+ }
+
+ if (quiet == 0) log_info ("");
+
+ uint shader_per_mp = 1;
+
+ if (device_param->vendor_id == VENDOR_ID_AMD)
+ {
+ shader_per_mp = 8;
+ }
+ else if (device_param->vendor_id == VENDOR_ID_NV)
+ {
+ shader_per_mp = 32;
}
- for (uint tmto = tmto_start; tmto < tmto_stop; tmto <<= 1)
+ for (uint tmto = tmto_start; tmto < tmto_stop; tmto++)
{
- // todo -- make sure all salts get the new tmto value
+ // TODO: in theory the following calculation needs to be done per salt, not global
+ // we assume all hashes have the same scrypt settings
size_scryptV = (128 * data.salts_buf[0].scrypt_r) * data.salts_buf[0].scrypt_N;
- size_scryptV /= tmto;
+ size_scryptV /= 1 << tmto;
- size_scryptV *= gpu_processors * WAVEFRONTS * SHADER_PER_MP;
+ size_scryptV *= device_processors * device_processor_cores * shader_per_mp;
- if (size_scryptV > (device_param->gpu_maxmem_alloc / 2)) continue;
+ if (size_scryptV > device_param->device_maxmem_alloc)
+ {
+ if (quiet == 0) log_info ("WARNING: not enough device memory allocatable to use --scrypt-tmto %d, increasing...", tmto);
+
+ continue;
+ }
for (uint salts_pos = 0; salts_pos < data.salts_cnt; salts_pos++)
{
data.salts_buf[salts_pos].scrypt_tmto = tmto;
- data.salts_buf[salts_pos].scrypt_phy = gpu_processors * WAVEFRONTS * SHADER_PER_MP;
+ data.salts_buf[salts_pos].scrypt_phy = device_processors * device_processor_cores * shader_per_mp;
}
break;
}
- if (data.salts_buf[0].scrypt_tmto == 0)
+ if (data.salts_buf[0].scrypt_phy == 0)
{
- log_error ("ERROR: can't allocate enough GPU memory");
+ log_error ("ERROR: can't allocate enough device memory");
return -1;
}
if (quiet == 0) log_info ("");
- if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u\n", data.salts_buf[0].scrypt_tmto);
+ if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %u\n", data.salts_buf[0].scrypt_tmto, size_scryptV);
}
/**
char build_opts[1024];
- // we don't have sm_* on AMD but it doesn't matter
+ // we don't have sm_* on vendors not NV but it doesn't matter
- sprintf (build_opts, "-I. -IOpenCL/ -DVENDOR_ID=%d -DCUDA_ARCH=%d", vendor_id, (device_param->sm_major * 100) + device_param->sm_minor);
+ sprintf (build_opts, "-I%s/ -DVENDOR_ID=%d -DCUDA_ARCH=%d -DVECT_SIZE=%u", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width);
/**
* main kernel
* kernel source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
- generate_source_kernel_filename (attack_exec, attack_kern, kern_type, install_dir, source_file);
+ generate_source_kernel_filename (attack_exec, attack_kern, kern_type, shared_dir, source_file);
struct stat sst;
* kernel cached filename
*/
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
+ char cached_file[256] = { 0 };
- generate_cached_kernel_filename (attack_exec, attack_kern, kern_type, install_dir, device_name, device_version, driver_version, vendor_id, cached_file);
+ generate_cached_kernel_filename (attack_exec, attack_kern, kern_type, profile_dir, device_name_chksum, cached_file);
int cached = 1;
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
- if (force_jit_compilation == 0)
+ if (force_jit_compilation == -1)
{
if (cached == 0)
{
clGetProgramInfo (device_param->program, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program, 1, &device_param->device, build_opts, NULL, NULL);
}
}
else if (force_jit_compilation == 8900)
{
- sprintf (build_opts, "%s -DSCRYPT_N=%d -DSCRYPT_R=%d -DSCRYPT_P=%d -DSCRYPT_TMTO=%d", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, data.salts_buf[0].scrypt_tmto);
+ sprintf (build_opts, "%s -DSCRYPT_N=%d -DSCRYPT_R=%d -DSCRYPT_P=%d -DSCRYPT_TMTO=%d", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.salts_buf[0].scrypt_tmto);
}
hc_clBuildProgram (device_param->program, 1, &device_param->device, build_opts, NULL, NULL);
* kernel mp source filename
*/
- char source_file[256];
+ char source_file[256] = { 0 };
- memset (source_file, 0, sizeof (source_file));
-
- generate_source_kernel_mp_filename (opti_type, opts_type, install_dir, source_file);
+ generate_source_kernel_mp_filename (opti_type, opts_type, shared_dir, source_file);
struct stat sst;
* kernel mp cached filename
*/
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
+ char cached_file[256] = { 0 };
- generate_cached_kernel_mp_filename (opti_type, opts_type, install_dir, device_name, device_version, driver_version, vendor_id, cached_file);
+ generate_cached_kernel_mp_filename (opti_type, opts_type, profile_dir, device_name_chksum, cached_file);
int cached = 1;
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
if (cached == 0)
{
clGetProgramInfo (device_param->program_mp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program_mp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_mp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program_mp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL);
}
* amplifier kernel
*/
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
}
* kernel amp source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
- generate_source_kernel_amp_filename (attack_kern, install_dir, source_file);
+ generate_source_kernel_amp_filename (attack_kern, shared_dir, source_file);
struct stat sst;
* kernel amp cached filename
*/
- char cached_file[256];
+ char cached_file[256] = { 0 };
- memset (cached_file, 0, sizeof (cached_file));
-
- generate_cached_kernel_amp_filename (attack_kern, install_dir, device_name, device_version, driver_version, vendor_id, cached_file);
+ generate_cached_kernel_amp_filename (attack_kern, profile_dir, device_name_chksum, cached_file);
int cached = 1;
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
if (cached == 0)
{
clGetProgramInfo (device_param->program_amp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program_amp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_amp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program_amp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL);
}
device_param->d_rules = hc_clCreateBuffer (device_param->context, CL_MEM_READ_ONLY, size_rules, NULL);
device_param->d_rules_c = hc_clCreateBuffer (device_param->context, CL_MEM_READ_ONLY, size_rules_c, NULL);
- hc_clEnqueueWriteBuffer (device_param->command_queue, device_param->d_rules, CL_TRUE, 0, size_rules, gpu_rules_buf, 0, NULL, NULL);
+ hc_clEnqueueWriteBuffer (device_param->command_queue, device_param->d_rules, CL_TRUE, 0, size_rules, kernel_rules_buf, 0, NULL, NULL);
- run_kernel_bzero (device_param, device_param->d_rules_c, size_rules_c);
+ run_kernel_bzero (device_param, device_param->d_rules_c, size_rules_c);
}
else if (attack_kern == ATTACK_KERN_COMBI)
{
device_param->pw_caches = pw_caches;
- comb_t *combs_buf = (comb_t *) mycalloc (GPU_COMBS, sizeof (comb_t));
+ comb_t *combs_buf = (comb_t *) mycalloc (KERNEL_COMBS, sizeof (comb_t));
device_param->combs_buf = combs_buf;
device_param->kernel_params_buf32[24] = 0; // salt_pos
device_param->kernel_params_buf32[25] = 0; // loop_pos
device_param->kernel_params_buf32[26] = 0; // loop_cnt
- device_param->kernel_params_buf32[27] = 0; // gpu_rules_cnt
+ device_param->kernel_params_buf32[27] = 0; // kernel_rules_cnt
device_param->kernel_params_buf32[28] = 0; // digests_cnt
device_param->kernel_params_buf32[29] = 0; // digests_offset
device_param->kernel_params_buf32[30] = 0; // combs_mode
device_param->kernel_params_buf32[31] = 0; // gid_max
- device_param->kernel_params[ 0] = (attack_exec == ATTACK_EXEC_ON_GPU)
+ device_param->kernel_params[ 0] = (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
? &device_param->d_pws_buf
: &device_param->d_pws_amp_buf;
device_param->kernel_params[ 1] = &device_param->d_rules_c;
* kernel name
*/
- char kernel_name[64];
-
- memset (kernel_name, 0, sizeof (kernel_name));
+ char kernel_name[64] = { 0 };
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (opti_type & OPTI_TYPE_SINGLE_HASH)
{
}
else
{
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 4);
+ snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 4);
device_param->kernel1 = hc_clCreateKernel (device_param->program, kernel_name);
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 8);
+ snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 8);
device_param->kernel2 = hc_clCreateKernel (device_param->program, kernel_name);
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 16);
+ snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 16);
device_param->kernel3 = hc_clCreateKernel (device_param->program, kernel_name);
}
device_param->kernel_mp = hc_clCreateKernel (device_param->program_mp, "C_markov");
}
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
// nothing to do
}
device_param->kernel_amp = hc_clCreateKernel (device_param->program_amp, "amp");
}
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
// nothing to do
}
* Store initial fanspeed if gpu_temp_retain is enabled
*/
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
int gpu_temp_retain_set = 0;
if (gpu_temp_disable == 0)
uint cur_temp = 0;
uint default_temp = 0;
- int ADL_rc = hc_ADL_Overdrive6_TargetTemperatureData_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, (int *) &cur_temp, (int *) &default_temp);
+ int ADL_rc = hc_ADL_Overdrive6_TargetTemperatureData_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, (int *) &cur_temp, (int *) &default_temp);
if (ADL_rc == ADL_OK)
{
int powertune_supported = 0;
- if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
ADLOD6PowerControlInfo powertune = {0, 0, 0, 0, 0};
- if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &powertune)) == ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune)) == ADL_OK)
{
- ADL_rc = hc_ADL_Overdrive_PowerControl_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &od_power_control_status[device_id]);
+ ADL_rc = hc_ADL_Overdrive_PowerControl_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &od_power_control_status[device_id]);
}
if (ADL_rc != ADL_OK)
return (-1);
}
- if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll, data.hm_device[device_id].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
{
log_error ("ERROR: Failed to set new ADL PowerControl values");
od_clock_mem_status[device_id].state.iNumberOfPerformanceLevels = 2;
- if ((ADL_rc = hc_ADL_Overdrive_StateInfo_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, ADL_OD6_GETSTATEINFO_CUSTOM_PERFORMANCE, &od_clock_mem_status[device_id])) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_StateInfo_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_GETSTATEINFO_CUSTOM_PERFORMANCE, &od_clock_mem_status[device_id])) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL memory and engine clock frequency");
ADLOD6Capabilities caps = {0, 0, 0, {0, 0, 0}, {0, 0, 0}, 0, 0};
- if ((ADL_rc = hc_ADL_Overdrive_Capabilities_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &caps)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_Capabilities_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &caps)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL device capabilities");
int engine_clock_profile_max = od_clock_mem_status[device_id].state.aLevels[1].iEngineClock;
int memory_clock_profile_max = od_clock_mem_status[device_id].state.aLevels[1].iMemoryClock;
- // warning if profile has to low max values
+ // warning if profile has too low max values
if ((engine_clock_max - engine_clock_profile_max) > warning_trigger_engine)
{
performance_state->aLevels[0].iMemoryClock = memory_clock_profile_max;
performance_state->aLevels[1].iMemoryClock = memory_clock_profile_max;
- if ((ADL_rc = hc_ADL_Overdrive_State_Set (data.hm_dll, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_State_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
{
log_info ("ERROR: Failed to set ADL performance state");
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON && HAVE_ADL
}
- data.gpu_blocks_all = gpu_blocks_all;
+ data.kernel_blocks_all = kernel_blocks_all;
if (data.quiet == 0) log_info ("");
char *hash_type = strhashtype (data.hash_mode); // not a bug
log_info ("Hashtype: %s", hash_type);
- log_info ("Workload: %u loops, %u accel", gpu_loops, gpu_accel);
+ log_info ("Workload: %u loops, %u accel", kernel_loops, kernel_accel);
log_info ("");
}
* keep track of the progress
*/
- data.words_progress_done = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
- data.words_progress_rejected = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
- data.words_progress_restored = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
+ data.words_progress_done = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
+ data.words_progress_rejected = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
+ data.words_progress_restored = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
/**
* open filehandles
data.quiet = 1;
- const uint64_t words1_cnt = count_words (wl_data, fp1, dictfile1, dictstat_base, &dictstat_nmemb);
+ const u64 words1_cnt = count_words (wl_data, fp1, dictfile1, dictstat_base, &dictstat_nmemb);
data.quiet = quiet;
data.quiet = 1;
- const uint64_t words2_cnt = count_words (wl_data, fp2, dictfile2, dictstat_base, &dictstat_nmemb);
+ const u64 words2_cnt = count_words (wl_data, fp2, dictfile2, dictstat_base, &dictstat_nmemb);
data.quiet = quiet;
if (weak_hash_threshold >= salts_cnt)
{
+ uint first_device_id = 0;
+
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ first_device_id = device_id;
+
+ break;
+ }
+
if (data.quiet == 0) log_info_nn ("Checking for weak hashes...");
for (uint salt_pos = 0; salt_pos < salts_cnt; salt_pos++)
{
- weak_hash_check (&data.devices_param[0], salt_pos, gpu_loops);
+ weak_hash_check (&data.devices_param[first_device_id], salt_pos, kernel_loops);
}
}
data.css_cnt = css_cnt;
data.css_buf = css_buf;
- uint uniq_tbls[SP_PW_MAX][CHARSIZ];
-
- memset (uniq_tbls, 0, sizeof (uniq_tbls));
+ uint uniq_tbls[SP_PW_MAX][CHARSIZ] = { { 0 } };
mp_css_to_uniq_tbl (css_cnt, css_buf, uniq_tbls);
if (root_table_buf == NULL) root_table_buf = (hcstat_table_t *) mycalloc (SP_ROOT_CNT, sizeof (hcstat_table_t));
if (markov_table_buf == NULL) markov_table_buf = (hcstat_table_t *) mycalloc (SP_MARKOV_CNT, sizeof (hcstat_table_t));
- sp_setup_tbl (install_dir, markov_hcstat, markov_disable, markov_classic, root_table_buf, markov_table_buf);
+ sp_setup_tbl (shared_dir, markov_hcstat, markov_disable, markov_classic, root_table_buf, markov_table_buf);
markov_threshold = (markov_threshold != 0) ? markov_threshold : CHARSIZ;
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->kernel_params_mp[0] = &device_param->d_combs;
device_param->kernel_params_mp[1] = &device_param->d_root_css_buf;
device_param->kernel_params_mp[2] = &device_param->d_markov_css_buf;
data.devices_status = STATUS_INIT;
- memset (data.words_progress_done, 0, data.salts_cnt * sizeof (uint64_t));
- memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (uint64_t));
- memset (data.words_progress_restored, 0, data.salts_cnt * sizeof (uint64_t));
+ memset (data.words_progress_done, 0, data.salts_cnt * sizeof (u64));
+ memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (u64));
+ memset (data.words_progress_restored, 0, data.salts_cnt * sizeof (u64));
memset (data.cpt_buf, 0, CPT_BUF * sizeof (cpt_t));
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->speed_pos = 0;
- memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (uint64_t));
+ memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (u64));
memset (device_param->speed_ms, 0, SPEED_CACHE * sizeof (float));
memset (device_param->speed_rec, 0, SPEED_CACHE * sizeof (hc_timer_t));
- device_param->gpu_power = device_param->gpu_power_user;
- device_param->gpu_blocks = device_param->gpu_blocks_user;
+ device_param->kernel_power = device_param->kernel_power_user;
+ device_param->kernel_blocks = device_param->kernel_blocks_user;
device_param->outerloop_pos = 0;
device_param->outerloop_left = 0;
device_param->words_done = 0;
}
- data.gpu_blocks_div = 0;
+ data.kernel_blocks_div = 0;
// figure out some workload
if (maskpos > 0 && dictpos == 0) free (masks[maskpos - 1]);
- uint uniq_tbls[SP_PW_MAX][CHARSIZ];
-
- memset (uniq_tbls, 0, sizeof (uniq_tbls));
+ uint uniq_tbls[SP_PW_MAX][CHARSIZ] = { { 0 } };
mp_css_to_uniq_tbl (css_cnt, css_buf, uniq_tbls);
if (root_table_buf == NULL) root_table_buf = (hcstat_table_t *) mycalloc (SP_ROOT_CNT, sizeof (hcstat_table_t));
if (markov_table_buf == NULL) markov_table_buf = (hcstat_table_t *) mycalloc (SP_MARKOV_CNT, sizeof (hcstat_table_t));
- sp_setup_tbl (install_dir, markov_hcstat, markov_disable, markov_classic, root_table_buf, markov_table_buf);
+ sp_setup_tbl (shared_dir, markov_hcstat, markov_disable, markov_classic, root_table_buf, markov_table_buf);
markov_threshold = (markov_threshold != 0) ? markov_threshold : CHARSIZ;
uint css_cnt_l = css_cnt;
uint css_cnt_r;
- if (attack_exec == ATTACK_EXEC_ON_GPU)
+ if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (save_css_cnt < 6)
{
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->kernel_params_mp_l[0] = &device_param->d_pws_buf;
device_param->kernel_params_mp_l[1] = &device_param->d_root_css_buf;
device_param->kernel_params_mp_l[2] = &device_param->d_markov_css_buf;
}
}
- uint64_t words_base = data.words_cnt;
+ u64 words_base = data.words_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
- if (data.gpu_rules_cnt)
+ if (data.kernel_rules_cnt)
{
- words_base /= data.gpu_rules_cnt;
+ words_base /= data.kernel_rules_cnt;
}
}
else if (data.attack_kern == ATTACK_KERN_COMBI)
{
for (uint i = 0; i < data.salts_cnt; i++)
{
- data.words_progress_restored[i] = data.words_cur * data.gpu_rules_cnt;
+ data.words_progress_restored[i] = data.words_cur * data.kernel_rules_cnt;
}
}
else if (data.attack_kern == ATTACK_KERN_COMBI)
if ((wordlist_mode == WL_MODE_FILE) || (wordlist_mode == WL_MODE_MASK))
{
- if (data.words_base < gpu_blocks_all)
+ if (data.words_base < kernel_blocks_all)
{
if (quiet == 0)
{
log_info ("");
log_info ("ATTENTION!");
log_info (" The wordlist or mask you are using is too small.");
- log_info (" Therefore, oclHashcat is unable to utilize the full parallelization power of your GPU(s).");
+ log_info (" Therefore, oclHashcat is unable to utilize the full parallelization power of your device(s).");
log_info (" The cracking speed will drop.");
log_info (" Workaround: https://hashcat.net/wiki/doku.php?id=frequently_asked_questions#how_to_create_more_work_for_full_speed");
log_info ("");
{
hc_device_param_t *device_param = &devices_param[device_id];
- device_param->device_id = device_id;
-
if (wordlist_mode == WL_MODE_STDIN)
{
hc_thread_create (c_threads[device_id], thread_calc_stdin, device_param);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
local_free (device_param->result);
local_free (device_param->pw_caches);
local_free (device_param->device_name);
+ local_free (device_param->device_name_chksum);
+
local_free (device_param->device_version);
local_free (device_param->driver_version);
if (device_param->program) hc_clReleaseProgram (device_param->program);
if (device_param->program_mp) hc_clReleaseProgram (device_param->program_mp);
if (device_param->program_amp) hc_clReleaseProgram (device_param->program_amp);
+
if (device_param->command_queue) hc_clReleaseCommandQueue (device_param->command_queue);
if (device_param->context) hc_clReleaseContext (device_param->context);
}
// reset default fan speed
+ #ifdef HAVE_HWMON
if (gpu_temp_disable == 0)
{
+ #ifdef HAVE_ADL
if (gpu_temp_retain != 0) // VENDOR_ID_AMD is implied here
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].fan_supported == 1)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if (data.hm_device[device_id].fan_supported == 1)
{
- int fanspeed = temp_retain_fanspeed_value[i];
+ int fanspeed = temp_retain_fanspeed_value[device_id];
if (fanspeed == -1) continue;
- int rc = hm_set_fanspeed_with_device_id_amd (i, fanspeed);
+ int rc = hm_set_fanspeed_with_device_id_amd (device_id, fanspeed);
- if (rc == -1) log_info ("WARNING: Failed to restore default fan speed for gpu number: %i:", i);
+ if (rc == -1) log_info ("WARNING: Failed to restore default fan speed for gpu number: %i:", device_id);
}
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADL
}
// reset power tuning
+ #ifdef HAVE_ADL
if (powertune_enable == 1) // VENDOR_ID_AMD is implied here
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].od_version == 6)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if (data.hm_device[device_id].od_version == 6)
{
// check powertune capabilities first, if not available then skip device
int powertune_supported = 0;
- if ((hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll, data.hm_device[i].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
{
// powercontrol settings
- if ((hc_ADL_Overdrive_PowerControl_Set (data.hm_dll, data.hm_device[i].adapter_index.amd, od_power_control_status[i])) != ADL_OK)
+ if ((hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, od_power_control_status[device_id])) != ADL_OK)
{
log_info ("ERROR: Failed to restore the ADL PowerControl values");
performance_state->iNumberOfPerformanceLevels = 2;
- performance_state->aLevels[0].iEngineClock = od_clock_mem_status[i].state.aLevels[0].iEngineClock;
- performance_state->aLevels[1].iEngineClock = od_clock_mem_status[i].state.aLevels[1].iEngineClock;
- performance_state->aLevels[0].iMemoryClock = od_clock_mem_status[i].state.aLevels[0].iMemoryClock;
- performance_state->aLevels[1].iMemoryClock = od_clock_mem_status[i].state.aLevels[1].iMemoryClock;
+ performance_state->aLevels[0].iEngineClock = od_clock_mem_status[device_id].state.aLevels[0].iEngineClock;
+ performance_state->aLevels[1].iEngineClock = od_clock_mem_status[device_id].state.aLevels[1].iEngineClock;
+ performance_state->aLevels[0].iMemoryClock = od_clock_mem_status[device_id].state.aLevels[0].iMemoryClock;
+ performance_state->aLevels[1].iMemoryClock = od_clock_mem_status[device_id].state.aLevels[1].iMemoryClock;
- if ((hc_ADL_Overdrive_State_Set (data.hm_dll, data.hm_device[i].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
+ if ((hc_ADL_Overdrive_State_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
{
log_info ("ERROR: Failed to restore ADL performance state");
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADL
if (gpu_temp_disable == 0)
{
- if (vendor_id == VENDOR_ID_NV)
+ #if defined(LINUX) && defined(HAVE_NVML)
+ if (data.hm_dll_nv)
{
- #ifdef LINUX
- hc_NVML_nvmlShutdown (data.hm_dll);
- #endif
+ hc_NVML_nvmlShutdown (data.hm_dll_nv);
- #ifdef WIN
- NvAPI_Unload ();
- #endif
+ hm_close (data.hm_dll_nv);
}
+ #endif
- if (vendor_id == VENDOR_ID_AMD)
- {
- hc_ADL_Main_Control_Destroy (data.hm_dll);
-
- hm_close (data.hm_dll);
- }
+ #if defined(WIN) && (HAVE_NVAPI)
+ NvAPI_Unload ();
+ #endif
- #ifdef LINUX
- if (vendor_id == VENDOR_ID_NV)
+ #ifdef HAVE_ADL
+ if (data.hm_dll_amd)
{
- hm_close (data.hm_dll);
+ hc_ADL_Main_Control_Destroy (data.hm_dll_amd);
+
+ hm_close (data.hm_dll_amd);
}
#endif
}
+ #endif // HAVE_HWMON
// free memory
local_free (pot);
- local_free (all_gpu_rules_cnt);
- local_free (all_gpu_rules_buf);
+ local_free (all_kernel_rules_cnt);
+ local_free (all_kernel_rules_buf);
local_free (wl_data->buf);
local_free (wl_data);
local_free (bitmap_s2_c);
local_free (bitmap_s2_d);
+ #ifdef HAVE_HWMON
local_free (temp_retain_fanspeed_value);
+ #ifdef HAVE_ADL
local_free (od_clock_mem_status);
local_free (od_power_control_status);
+ #endif // ADL
+ #endif
global_free (devices_param);
- global_free (gpu_rules_buf);
+ global_free (kernel_rules_buf);
global_free (root_css_buf);
global_free (markov_css_buf);