* License.....: MIT
*/
+#ifdef OSX
+#include <stdio.h>
+#endif
+
#include <common.h>
#include <shared.h>
#include <rp_kernel_on_cpu.h>
#include <getopt.h>
const char *PROGNAME = "oclHashcat";
-const char *VERSION_TXT = "2.10";
const uint VERSION_BIN = 210;
const uint RESTORE_MIN = 210;
#define POWERTUNE_ENABLE 0
#define LOGFILE_DISABLE 0
#define SCRYPT_TMTO 0
+#define OPENCL_VECTOR_WIDTH 0
#define WL_MODE_STDIN 1
#define WL_MODE_FILE 2
1000,
1100,
2100,
- 12800,
+ 12800,
1500,
12400,
500,
* types
*/
-static void (*get_next_word_func) (char *, uint32_t, uint32_t *, uint32_t *);
+static void (*get_next_word_func) (char *, u32, u32 *, u32 *);
/**
* globals
" -c, --segment-size=NUM Size in MB to cache from the wordfile",
" --bitmap-min=NUM Minimum number of bits allowed for bitmaps",
" --bitmap-max=NUM Maximum number of bits allowed for bitmaps",
+ #ifndef OSX
" --cpu-affinity=STR Locks to CPU devices, separate with comma",
+ #else
+ " --cpu-affinity=STR Locks to CPU devices, separate with comma (disabled on OSX)",
+ #endif
+ " --opencl-platforms=STR OpenCL platforms to use, separate with comma",
" -d, --opencl-devices=STR OpenCL devices to use, separate with comma",
" --opencl-device-types=STR OpenCL device-types to use, separate with comma, see references below",
- " --opencl-platform=NUM OpenCL platform to use, in case multiple platforms are present",
+ " --opencl-vector-width=NUM OpenCL vector-width (either 1, 2, 4 or 8), overrides value from device query",
" -w, --workload-profile=NUM Enable a specific workload profile, see references below",
" -n, --kernel-accel=NUM Workload tuning: 1, 8, 40, 80, 160",
" -u, --kernel-loops=NUM Workload fine-tuning: 8 - 1024",
+ #ifdef HAVE_HWMON
" --gpu-temp-disable Disable temperature and fanspeed readings and triggers",
" --gpu-temp-abort=NUM Abort session if GPU temperature reaches NUM degrees celsius",
" --gpu-temp-retain=NUM Try to retain GPU temperature at NUM degrees celsius (AMD only)",
+ #ifdef HAVE_ADL
" --powertune-enable Enable automatic power tuning option (AMD OverDrive 6 only)",
+ #endif
+ #endif
" --scrypt-tmto=NUM Manually override automatically calculated TMTO value for scrypt",
"",
"* Distributed:",
{
hc_device_param_t *device_param = &data.devices_param[device_id];
- uint64_t speed_cnt = 0;
- float speed_ms = 0;
+ if (device_param->skipped) continue;
+
+ u64 speed_cnt = 0;
+ float speed_ms = 0;
for (int i = 0; i < SPEED_CACHE; i++)
{
* words_cur
*/
- uint64_t words_cur = get_lowest_words_done ();
+ u64 words_cur = get_lowest_words_done ();
fprintf (out, "CURKU\t%llu\t", (unsigned long long int) words_cur);
if (salts_left == 0) salts_left = 1;
- uint64_t progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * salts_left;
- uint64_t all_done = 0;
- uint64_t all_rejected = 0;
- uint64_t all_restored = 0;
+ u64 all_done = 0;
+ u64 all_rejected = 0;
+ u64 all_restored = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
all_restored += data.words_progress_restored[salt_pos];
}
- uint64_t progress_cur = all_restored + all_done + all_rejected;
- uint64_t progress_end = progress_total;
+ u64 progress_cur = all_restored + all_done + all_rejected;
+ u64 progress_end = progress_total;
- uint64_t progress_skip = 0;
+ u64 progress_skip = 0;
if (data.skip)
{
else if (data.attack_kern == ATTACK_KERN_BF) progress_end *= data.bfs_cnt;
}
- uint64_t progress_cur_relative_skip = progress_cur - progress_skip;
- uint64_t progress_end_relative_skip = progress_end - progress_skip;
+ u64 progress_cur_relative_skip = progress_cur - progress_skip;
+ u64 progress_end_relative_skip = progress_end - progress_skip;
fprintf (out, "PROGRESS\t%llu\t%llu\t", (unsigned long long int) progress_cur_relative_skip, (unsigned long long int) progress_end_relative_skip);
* temperature
*/
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
fprintf (out, "TEMP\t");
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- int temp = hm_get_temperature_with_device_id (i);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ int temp = hm_get_temperature_with_device_id (device_id);
fprintf (out, "%d\t", temp);
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
#ifdef _WIN
fputc ('\r', out);
* speed new
*/
- uint64_t speed_cnt[DEVICES_MAX];
- float speed_ms[DEVICES_MAX];
+ u64 speed_cnt[DEVICES_MAX];
+ float speed_ms[DEVICES_MAX];
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
// we need to clear values (set to 0) because in case the device does
// not get new candidates it idles around but speed display would
// show it as working.
// if we instantly set it to 0 after reading it happens that the
- // speed can be shown as zero if the users refreshs to fast.
+ // speed can be shown as zero if the users refreshes too fast.
// therefore, we add a timestamp when a stat was recorded and if its
- // to old we will not use it
+ // too old we will not use it
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
hashes_dev_ms[device_id] = 0;
if (speed_ms[device_id])
if (salts_left == 0) salts_left = 1;
- uint64_t progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * salts_left;
- uint64_t all_done = 0;
- uint64_t all_rejected = 0;
- uint64_t all_restored = 0;
+ u64 all_done = 0;
+ u64 all_rejected = 0;
+ u64 all_restored = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
all_restored += data.words_progress_restored[salt_pos];
}
- uint64_t progress_cur = all_restored + all_done + all_rejected;
- uint64_t progress_end = progress_total;
+ u64 progress_cur = all_restored + all_done + all_rejected;
+ u64 progress_end = progress_total;
- uint64_t progress_skip = 0;
+ u64 progress_skip = 0;
if (data.skip)
{
else if (data.attack_kern == ATTACK_KERN_BF) progress_end *= data.bfs_cnt;
}
- uint64_t progress_cur_relative_skip = progress_cur - progress_skip;
- uint64_t progress_end_relative_skip = progress_end - progress_skip;
+ u64 progress_cur_relative_skip = progress_cur - progress_skip;
+ u64 progress_end_relative_skip = progress_end - progress_skip;
- float speed_ms_real = ms_running - ms_paused;
- uint64_t speed_plains_real = all_done;
+ float speed_ms_real = ms_running - ms_paused;
+ u64 speed_plains_real = all_done;
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
if (data.devices_status != STATUS_CRACKED)
{
- uint64_t words_per_ms = 0;
+ u64 words_per_ms = 0;
if (speed_plains_real && speed_ms_real)
{
if (words_per_ms)
{
- uint64_t progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
+ u64 progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
- uint64_t ms_left = progress_left_relative_skip / words_per_ms;
+ u64 ms_left = progress_left_relative_skip / words_per_ms;
sec_etc = ms_left / 1000;
}
{
log_info ("Time.Estimated.: 0 secs");
}
- else if ((uint64_t) sec_etc > ETC_MAX)
+ else if ((u64) sec_etc > ETC_MAX)
{
log_info ("Time.Estimated.: > 10 Years");
}
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- char display_dev_cur[16];
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- memset (display_dev_cur, 0, sizeof (display_dev_cur));
+ char display_dev_cur[16] = { 0 };
strncpy (display_dev_cur, "0.00", 4);
log_info ("Speed.Dev.#%d...: %9sH/s", device_id + 1, display_dev_cur);
}
- char display_all_cur[16];
-
- memset (display_all_cur, 0, sizeof (display_all_cur));
+ char display_all_cur[16] = { 0 };
strncpy (display_all_cur, "0.00", 4);
format_speed_display (hashes_all_ms * 1000, display_all_cur, sizeof (display_all_cur));
- if (data.devices_cnt > 1) log_info ("Speed.Dev.#*...: %9sH/s", display_all_cur);
+ if (data.devices_active > 1) log_info ("Speed.Dev.#*...: %9sH/s", display_all_cur);
const float digests_percent = (float) data.digests_done / data.digests_cnt;
const float salts_percent = (float) data.salts_done / data.salts_cnt;
// Restore point
- uint64_t restore_point = get_lowest_words_done ();
+ u64 restore_point = get_lowest_words_done ();
- uint64_t restore_total = data.words_base;
+ u64 restore_total = data.words_base;
float percent_restore = 0;
{
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
- log_info ("Progress.......: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
- log_info ("Rejected.......: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
+ log_info ("Progress.......: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
+ log_info ("Rejected.......: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
if (data.restore_disable == 0)
{
- log_info ("Restore.Point..: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
+ log_info ("Restore.Point..: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
}
}
else
}
}
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
#define HM_STR_BUF_SIZE 255
- if (data.hm_device[i].fan_supported == 1)
+ if (data.hm_device[device_id].fan_supported == 1)
{
- char temperature[HM_STR_BUF_SIZE];
char utilization[HM_STR_BUF_SIZE];
+ char temperature[HM_STR_BUF_SIZE];
char fanspeed[HM_STR_BUF_SIZE];
- hm_device_val_to_str ((char *) temperature, HM_STR_BUF_SIZE, "%", hm_get_temperature_with_device_id (i));
- hm_device_val_to_str ((char *) utilization, HM_STR_BUF_SIZE, "c", hm_get_utilization_with_device_id (i));
+ hm_device_val_to_str ((char *) utilization, HM_STR_BUF_SIZE, "%", hm_get_utilization_with_device_id (device_id));
+ hm_device_val_to_str ((char *) temperature, HM_STR_BUF_SIZE, "c", hm_get_temperature_with_device_id (device_id));
- if (data.vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
- hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "%", hm_get_fanspeed_with_device_id (i));
+ hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "%", hm_get_fanspeed_with_device_id (device_id));
}
-
- if (data.vendor_id == VENDOR_ID_NV)
+ else if (device_param->vendor_id == VENDOR_ID_NV)
{
#ifdef LINUX
- hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "%", hm_get_fanspeed_with_device_id (i));
+ hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "%", hm_get_fanspeed_with_device_id (device_id));
#else
- hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "rpm", hm_get_fanspeed_with_device_id (i));
+ hm_device_val_to_str ((char *) fanspeed, HM_STR_BUF_SIZE, "rpm", hm_get_fanspeed_with_device_id (device_id));
#endif
}
- log_info ("HWMon.GPU.#%d...: %s Util, %s Temp, %s Fan", i + 1, utilization, temperature, fanspeed);
+ log_info ("HWMon.GPU.#%d...: %s Util, %s Temp, %s Fan", device_id + 1, utilization, temperature, fanspeed);
}
else
{
- char temperature[HM_STR_BUF_SIZE];
char utilization[HM_STR_BUF_SIZE];
+ char temperature[HM_STR_BUF_SIZE];
- hm_device_val_to_str ((char *) temperature, HM_STR_BUF_SIZE, "%", hm_get_temperature_with_device_id (i));
- hm_device_val_to_str ((char *) utilization, HM_STR_BUF_SIZE, "c", hm_get_utilization_with_device_id (i));
+ hm_device_val_to_str ((char *) utilization, HM_STR_BUF_SIZE, "%", hm_get_utilization_with_device_id (device_id));
+ hm_device_val_to_str ((char *) temperature, HM_STR_BUF_SIZE, "c", hm_get_temperature_with_device_id (device_id));
- log_info ("HWMon.GPU.#%d...: %s Util, %s Temp, N/A Fan", i + 1, utilization, temperature);
+ log_info ("HWMon.GPU.#%d...: %s Util, %s Temp, N/A Fan", device_id + 1, utilization, temperature);
}
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
}
static void status_benchmark ()
if (data.words_cnt == 0) return;
- uint64_t speed_cnt[DEVICES_MAX];
- float speed_ms[DEVICES_MAX];
-
- uint device_id;
+ u64 speed_cnt[DEVICES_MAX];
+ float speed_ms[DEVICES_MAX];
- for (device_id = 0; device_id < data.devices_cnt; device_id++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
hashes_dev_ms[device_id] = 0;
if (speed_ms[device_id])
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- char display_dev_cur[16];
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- memset (display_dev_cur, 0, sizeof (display_dev_cur));
+ if (device_param->skipped) continue;
+
+ char display_dev_cur[16] = { 0 };
strncpy (display_dev_cur, "0.00", 4);
log_info ("Speed.Dev.#%d.: %9sH/s", device_id + 1, display_dev_cur);
}
- char display_all_cur[16];
-
- memset (display_all_cur, 0, sizeof (display_all_cur));
+ char display_all_cur[16] = { 0 };
strncpy (display_all_cur, "0.00", 4);
format_speed_display (hashes_all_ms * 1000, display_all_cur, sizeof (display_all_cur));
- if (data.devices_cnt > 1) log_info ("Speed.Dev.#*.: %9sH/s", display_all_cur);
+ if (data.devices_active > 1) log_info ("Speed.Dev.#*.: %9sH/s", display_all_cur);
}
/**
snprintf (source_file, 255, "%s/OpenCL/m%05d.cl", shared_dir, (int) kern_type);
}
-static void generate_cached_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *profile_dir, char *device_name_chksum, int vendor_id, char *cached_file)
+static void generate_cached_kernel_filename (const uint attack_exec, const uint attack_kern, const uint kern_type, char *profile_dir, char *device_name_chksum, char *cached_file)
{
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (attack_kern == ATTACK_KERN_STRAIGHT)
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d_a0.%s.kernel", profile_dir, vendor_id, (int) kern_type, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/m%05d_a0.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
else if (attack_kern == ATTACK_KERN_COMBI)
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d_a1.%s.kernel", profile_dir, vendor_id, (int) kern_type, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/m%05d_a1.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
else if (attack_kern == ATTACK_KERN_BF)
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d_a3.%s.kernel", profile_dir, vendor_id, (int) kern_type, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/m%05d_a3.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
}
else
{
- snprintf (cached_file, 255, "%s/kernels/%d/m%05d.%s.kernel", profile_dir, vendor_id, (int) kern_type, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/m%05d.%s.kernel", profile_dir, (int) kern_type, device_name_chksum);
}
}
}
}
-static void generate_cached_kernel_mp_filename (const uint opti_type, const uint opts_type, char *profile_dir, char *device_name_chksum, int vendor_id, char *cached_file)
+static void generate_cached_kernel_mp_filename (const uint opti_type, const uint opts_type, char *profile_dir, char *device_name_chksum, char *cached_file)
{
if ((opti_type & OPTI_TYPE_BRUTE_FORCE) && (opts_type & OPTS_TYPE_PT_GENERATE_BE))
{
- snprintf (cached_file, 255, "%s/kernels/%d/markov_be.%s.kernel", profile_dir, vendor_id, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/markov_be.%s.kernel", profile_dir, device_name_chksum);
}
else
{
- snprintf (cached_file, 255, "%s/kernels/%d/markov_le.%s.kernel", profile_dir, vendor_id, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/markov_le.%s.kernel", profile_dir, device_name_chksum);
}
}
snprintf (source_file, 255, "%s/OpenCL/amp_a%d.cl", shared_dir, attack_kern);
}
-static void generate_cached_kernel_amp_filename (const uint attack_kern, char *profile_dir, char *device_name_chksum, int vendor_id, char *cached_file)
+static void generate_cached_kernel_amp_filename (const uint attack_kern, char *profile_dir, char *device_name_chksum, char *cached_file)
{
- snprintf (cached_file, 255, "%s/kernels/%d/amp_a%d.%s.kernel", profile_dir, vendor_id, attack_kern, device_name_chksum);
+ snprintf (cached_file, 255, "%s/kernels/amp_a%d.%s.kernel", profile_dir, attack_kern, device_name_chksum);
}
static uint convert_from_hex (char *line_buf, const uint line_len)
for (i = 0, j = 0; j < line_len; i += 1, j += 2)
{
- line_buf[i] = hex_to_char (&line_buf[j]);
+ line_buf[i] = hex_to_u8 ((const u8 *) &line_buf[j]);
}
memset (line_buf + i, 0, line_len - i);
for (i = 0, j = 5; j < line_len - 1; i += 1, j += 2)
{
- line_buf[i] = hex_to_char (&line_buf[j]);
+ line_buf[i] = hex_to_u8 ((const u8 *) &line_buf[j]);
}
memset (line_buf + i, 0, line_len - i);
{
uint cnt = 0;
- char *buf = (char *) mymalloc (BUFSIZ);
+ char *buf = (char *) mymalloc (BUFSIZ + 1);
size_t nread_tmp = 0;
fflush (stdout);
}
-static void gidd_to_pw_t (hc_device_param_t *device_param, const uint64_t gidd, pw_t *pw)
+static void gidd_to_pw_t (hc_device_param_t *device_param, const u64 gidd, pw_t *pw)
{
hc_clEnqueueReadBuffer (device_param->command_queue, device_param->d_pws_buf, CL_TRUE, gidd * sizeof (pw_t), sizeof (pw_t), pw, 0, NULL, NULL);
}
int debug_rule_len = 0; // -1 error
uint debug_plain_len = 0;
- unsigned char debug_plain_ptr[BLOCK_SIZE];
+ u8 debug_plain_ptr[BLOCK_SIZE];
// hash
- char out_buf[4096]; memset (out_buf, 0, sizeof (out_buf));
+ char out_buf[4096] = { 0 };
ascii_digest (out_buf, salt_pos, digest_pos);
uint gidvid = plain.gidvid;
uint il_pos = plain.il_pos;
- uint64_t crackpos = device_param->words_off;
+ u64 crackpos = device_param->words_off;
uint plain_buf[16];
- unsigned char *plain_ptr = (unsigned char *) plain_buf;
+ u8 *plain_ptr = (u8 *) plain_buf;
unsigned int plain_len = 0;
if (data.attack_mode == ATTACK_MODE_STRAIGHT)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
}
else if (data.attack_mode == ATTACK_MODE_COMBI)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- uint64_t l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
- uint64_t r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
+ u64 l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
+ u64 r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
uint l_start = device_param->kernel_params_mp_l_buf32[5];
uint r_start = device_param->kernel_params_mp_r_buf32[5];
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
plain_len = pw.pw_len;
- uint64_t off = device_param->kernel_params_mp_buf64[3] + il_pos;
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
uint start = 0;
uint stop = device_param->kernel_params_mp_buf32[4];
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
plain_len = pw.pw_len;
- uint64_t off = device_param->kernel_params_mp_buf64[3] + il_pos;
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
uint start = 0;
uint stop = device_param->kernel_params_mp_buf32[4];
{
char *hashfile = data.hashfile;
- char new_hashfile[256];
- char old_hashfile[256];
-
- memset (new_hashfile, 0, sizeof (new_hashfile));
- memset (old_hashfile, 0, sizeof (old_hashfile));
+ char new_hashfile[256] = { 0 };
+ char old_hashfile[256] = { 0 };
snprintf (new_hashfile, 255, "%s.new", hashfile);
snprintf (old_hashfile, 255, "%s.old", hashfile);
if (data.hash_mode != 2500)
{
- char out_buf[4096];
-
- memset (out_buf, 0, sizeof (out_buf));
+ char out_buf[4096] = { 0 };
if (data.username == 1)
{
unlink (old_hashfile);
}
-static float find_kernel_blocks_div (const uint64_t total_left, const uint kernel_blocks_all)
+static float find_kernel_blocks_div (const u64 total_left, const uint kernel_blocks_all)
{
// function called only in case kernel_blocks_all > words_left)
kernel_blocks_div += kernel_blocks_div / 100;
- uint32_t kernel_blocks_new = (uint32_t) (kernel_blocks_all * kernel_blocks_div);
+ u32 kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
while (kernel_blocks_new < total_left)
{
kernel_blocks_div += kernel_blocks_div / 100;
- kernel_blocks_new = (uint32_t) (kernel_blocks_all * kernel_blocks_div);
+ kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
}
if (data.quiet == 0)
case KERN_RUN_2: kernel = device_param->kernel2; break;
case KERN_RUN_23: kernel = device_param->kernel23; break;
case KERN_RUN_3: kernel = device_param->kernel3; break;
- case KERN_RUN_WEAK: kernel = device_param->kernel_weak; break;
}
hc_clSetKernelArg (kernel, 21, sizeof (cl_uint), device_param->kernel_params[21]);
static void run_kernel_bzero (hc_device_param_t *device_param, cl_mem buf, const uint size)
{
- if (data.vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
+ // So far tested, amd is the only supporting this OpenCL 1.2 function without segfaulting
+
const cl_uchar zero = 0;
hc_clEnqueueFillBuffer (device_param->command_queue, buf, &zero, sizeof (cl_uchar), 0, size, 0, NULL, NULL);
}
-
- if (data.vendor_id == VENDOR_ID_NV)
+ else
{
// NOTE: clEnqueueFillBuffer () always fails with -59
- // IOW, it's not supported by Nvidia ForceWare <= 352.21,
+ // IOW, it's not supported by Nvidia ForceWare <= 352.21, also pocl segfaults
// How's that possible, OpenCL 1.2 support is advertised??
// We need to workaround...
myfree (tmp);
}
-
- if (data.vendor_id == VENDOR_ID_GENERIC)
- {
- const cl_uchar zero = 0;
-
- hc_clEnqueueFillBuffer (device_param->command_queue, buf, &zero, sizeof (cl_uchar), 0, size, 0, NULL, NULL);
- }
}
static int run_rule_engine (const int rule_len, const char *rule_buf)
}
else if (data.attack_kern == ATTACK_KERN_BF)
{
- const uint64_t off = device_param->words_off;
+ const u64 off = device_param->words_off;
device_param->kernel_params_mp_l_buf64[3] = off;
uint innerloop_cnt = 0;
if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL) innerloop_step = kernel_loops;
- else innerloop_step = 1;
+ else innerloop_step = 1;
if (data.attack_kern == ATTACK_KERN_STRAIGHT) innerloop_cnt = data.kernel_rules_cnt;
else if (data.attack_kern == ATTACK_KERN_COMBI) innerloop_cnt = data.combs_cnt;
if (run_rule_engine (data.rule_len_r, data.rule_buf_r))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = _old_apply_rule (data.rule_buf_r, data.rule_len_r, line_buf, line_len, rule_buf_out);
line_len = MIN (line_len, PW_DICTMAX);
- char *ptr = (char *) device_param->combs_buf[i].i;
+ u8 *ptr = (u8 *) device_param->combs_buf[i].i;
memcpy (ptr, line_buf_new, line_len);
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_r_buf64[3] = off;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_buf64[3] = off;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_buf64[3] = off;
* progress
*/
- uint64_t perf_sum_all = (uint64_t) pw_cnt * (uint64_t) innerloop_left;
+ u64 perf_sum_all = (u64) pw_cnt * (u64) innerloop_left;
hc_thread_mutex_lock (mux_counter);
return;
}
-static void get_next_word_lm (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_lm (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr >= 'a' && *ptr <= 'z') *ptr -= 0x20;
*len = sz;
}
-static void get_next_word_uc (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_uc (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr >= 'a' && *ptr <= 'z') *ptr -= 0x20;
*len = sz;
}
-static void get_next_word_std (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_std (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr != '\n') continue;
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
#ifdef _POSIX
-static uint64_t count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, size_t *dictstat_nmemb)
+static u64 count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, size_t *dictstat_nmemb)
#endif
#ifdef _WIN
-static uint64_t count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, uint *dictstat_nmemb)
+static u64 count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, uint *dictstat_nmemb)
#endif
{
hc_signal (NULL);
{
if (d_cache)
{
- uint64_t cnt = d_cache->cnt;
+ u64 cnt = d_cache->cnt;
- uint64_t keyspace = cnt;
+ u64 keyspace = cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
time_t now = 0;
time_t prev = 0;
- uint64_t comp = 0;
- uint64_t cnt = 0;
- uint64_t cnt2 = 0;
+ u64 comp = 0;
+ u64 cnt = 0;
+ u64 cnt2 = 0;
while (!feof (fd))
{
comp += wl_data->cnt;
- uint32_t i = 0;
+ u32 i = 0;
while (i < wl_data->cnt)
{
- uint32_t len;
- uint32_t off;
+ u32 len;
+ u32 off;
get_next_word_func (wl_data->buf + i, wl_data->cnt - i, &len, &off);
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
memcpy (p2->hi1, p1->hi1, 64 * sizeof (uint));
}
-static uint pw_add_to_hc1 (hc_device_param_t *device_param, const uint8_t *pw_buf, const uint pw_len)
+static uint pw_add_to_hc1 (hc_device_param_t *device_param, const u8 *pw_buf, const uint pw_len)
{
if (data.devices_status == STATUS_BYPASS) return 0;
uint cache_cnt = pw_cache->cnt;
- uint8_t *pw_hc1 = pw_cache->pw_buf.hc1[cache_cnt];
+ u8 *pw_hc1 = pw_cache->pw_buf.hc1[cache_cnt];
memcpy (pw_hc1, pw_buf, pw_len);
uint runtime_check = 0;
uint remove_check = 0;
uint status_check = 0;
- uint hwmon_check = 0;
uint restore_check = 0;
uint restore_left = data.restore_timer;
uint remove_left = data.remove_timer;
uint status_left = data.status_timer;
+ #ifdef HAVE_HWMON
+ uint hwmon_check = 0;
+
// these variables are mainly used for fan control (AMD only)
int *fan_speed_chgd = (int *) mycalloc (data.devices_cnt, sizeof (int));
int *temp_diff_old = (int *) mycalloc (data.devices_cnt, sizeof (int));
int *temp_diff_sum = (int *) mycalloc (data.devices_cnt, sizeof (int));
+ #ifdef HAVE_ADL
int temp_threshold = 1; // degrees celcius
int fan_speed_min = 15; // in percentage
int fan_speed_max = 100;
+ #endif // HAVE_ADL
time_t last_temp_check_time;
+ #endif // HAVE_HWMON
uint sleep_time = 1;
status_check = 1;
}
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
time (&last_temp_check_time);
hwmon_check = 1;
}
+ #endif
- if ((runtime_check == 0) && (remove_check == 0) && (status_check == 0) && (hwmon_check == 0) && (restore_check == 0))
+ if ((runtime_check == 0) && (remove_check == 0) && (status_check == 0) && (restore_check == 0))
{
+ #ifdef HAVE_HWMON
+ if (hwmon_check == 0)
+ #endif
return (p);
}
if (data.devices_status != STATUS_RUNNING) continue;
+ #ifdef HAVE_HWMON
if (hwmon_check == 1)
{
hc_thread_mutex_lock (mux_adl);
if (Ta == 0) Ta = 1;
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if ((data.devices_param[i].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- const int temperature = hm_get_temperature_with_device_id (i);
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+
+ const int temperature = hm_get_temperature_with_device_id (device_id);
if (temperature > (int) data.gpu_temp_abort)
{
- log_error ("ERROR: Temperature limit on GPU %d reached, aborting...", i + 1);
+ log_error ("ERROR: Temperature limit on GPU %d reached, aborting...", device_id + 1);
if (data.devices_status != STATUS_QUIT) myabort ();
break;
}
+ #ifdef HAVE_ADL
const int gpu_temp_retain = data.gpu_temp_retain;
if (gpu_temp_retain) // VENDOR_ID_AMD implied
{
- if (data.hm_device[i].fan_supported == 1)
+ if (data.hm_device[device_id].fan_supported == 1)
{
int temp_cur = temperature;
int temp_diff_new = gpu_temp_retain - temp_cur;
- temp_diff_sum[i] = temp_diff_sum[i] + temp_diff_new;
+ temp_diff_sum[device_id] = temp_diff_sum[device_id] + temp_diff_new;
// calculate Ta value (time difference in seconds between the last check and this check)
// PID controller (3-term controller: proportional - Kp, integral - Ki, derivative - Kd)
- int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[i] + Kd * ((float)(temp_diff_new - temp_diff_old[i])) / Ta);
+ int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[device_id] + Kd * ((float)(temp_diff_new - temp_diff_old[device_id])) / Ta);
if (abs (fan_diff_required) >= temp_threshold)
{
- const int fan_speed_cur = hm_get_fanspeed_with_device_id (i);
+ const int fan_speed_cur = hm_get_fanspeed_with_device_id (device_id);
int fan_speed_level = fan_speed_cur;
- if (fan_speed_chgd[i] == 0) fan_speed_level = temp_cur;
+ if (fan_speed_chgd[device_id] == 0) fan_speed_level = temp_cur;
int fan_speed_new = fan_speed_level - fan_diff_required;
if (fan_speed_new != fan_speed_cur)
{
- int freely_change_fan_speed = (fan_speed_chgd[i] == 1);
+ int freely_change_fan_speed = (fan_speed_chgd[device_id] == 1);
int fan_speed_must_change = (fan_speed_new > fan_speed_cur);
if ((freely_change_fan_speed == 1) || (fan_speed_must_change == 1))
{
- hm_set_fanspeed_with_device_id_amd (i, fan_speed_new);
+ hm_set_fanspeed_with_device_id_amd (device_id, fan_speed_new);
- fan_speed_chgd[i] = 1;
+ fan_speed_chgd[device_id] = 1;
}
- temp_diff_old[i] = temp_diff_new;
+ temp_diff_old[device_id] = temp_diff_new;
}
}
}
}
+ #endif // HAVE_ADL
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
if (restore_check == 1)
{
}
}
+ #ifdef HAVE_HWMON
myfree (fan_speed_chgd);
myfree (temp_diff_old);
myfree (temp_diff_sum);
+ #endif
p = NULL;
int (*parse_func) (char *, uint, hash_t *) = data.parse_func;
// buffers
- hash_t hash_buf;
-
- memset (&hash_buf, 0, sizeof (hash_buf));
+ hash_t hash_buf = { 0, 0, 0, 0, 0 };
hash_buf.digest = mymalloc (dgst_size);
pke[i] = byte_swap_32 (wpa->pke[i]);
}
- unsigned char mac1[6];
- unsigned char mac2[6];
+ u8 mac1[6];
+ u8 mac2[6];
memcpy (mac1, pke_ptr + 23, 6);
memcpy (mac2, pke_ptr + 29, 6);
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != (unsigned char) hex_to_char (&mac1_pos[j]))
+ if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
cracked = 0;
break;
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != (unsigned char) hex_to_char (&mac2_pos[j]))
+ if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
cracked = 0;
break;
return (p);
}
-static uint get_work (hc_device_param_t *device_param, const uint64_t max)
+static uint get_work (hc_device_param_t *device_param, const u64 max)
{
hc_thread_mutex_lock (mux_dispatcher);
- const uint64_t words_cur = data.words_cur;
- const uint64_t words_base = (data.limit == 0) ? data.words_base : data.limit;
+ const u64 words_cur = data.words_cur;
+ const u64 words_base = (data.limit == 0) ? data.words_base : data.limit;
device_param->words_off = words_cur;
- const uint64_t words_left = words_base - words_cur;
+ const u64 words_left = words_base - words_cur;
if (data.kernel_blocks_all > words_left)
{
{
if (device_param->kernel_blocks == device_param->kernel_blocks_user)
{
- const uint32_t kernel_blocks_new = (float) device_param->kernel_blocks * data.kernel_blocks_div;
- const uint32_t kernel_power_new = kernel_blocks_new;
+ const u32 kernel_blocks_new = (float) device_param->kernel_blocks * data.kernel_blocks_div;
+ const u32 kernel_power_new = kernel_blocks_new;
if (kernel_blocks_new < device_param->kernel_blocks)
{
{
hc_device_param_t *device_param = (hc_device_param_t *) p;
+ if (device_param->skipped) return NULL;
+
const uint attack_kern = data.attack_kern;
const uint kernel_blocks = device_param->kernel_blocks;
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
}
- device_param->pw_add (device_param, (uint8_t *) line_buf, line_len);
+ device_param->pw_add (device_param, (u8 *) line_buf, line_len);
words_cur++;
{
hc_device_param_t *device_param = (hc_device_param_t *) p;
+ if (device_param->skipped) return NULL;
+
const uint attack_mode = data.attack_mode;
const uint attack_kern = data.attack_kern;
if (work == 0) break;
- const uint64_t words_off = device_param->words_off;
- const uint64_t words_fin = words_off + work;
+ const u64 words_off = device_param->words_off;
+ const u64 words_fin = words_off + work;
const uint pw_cnt = work;
const uint pws_cnt = work;
wl_data->cnt = 0;
wl_data->pos = 0;
- uint64_t words_cur = 0;
+ u64 words_cur = 0;
while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
- uint64_t words_off = 0;
- uint64_t words_fin = 0;
+ u64 words_off = 0;
+ u64 words_fin = 0;
- uint64_t max = -1;
+ u64 max = -1;
while (max)
{
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
}
- device_param->pw_add (device_param, (uint8_t *) line_buf, line_len);
+ device_param->pw_add (device_param, (u8 *) line_buf, line_len);
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
device_param->kernel_params_buf32[30] = 0;
device_param->kernel_params_buf32[31] = 1;
- char *dictfile_old = data.dictfile;
- char *dictfile2_old = data.dictfile2;
- char *mask_old = data.mask;
- int attack_mode_old = data.attack_mode;
+ char *dictfile_old = data.dictfile;
const char *weak_hash_check = "weak-hash-check";
- data.dictfile = (char *) weak_hash_check;
- data.dictfile2 = (char *) weak_hash_check;
- data.mask = (char *) weak_hash_check;
- data.attack_mode = ATTACK_MODE_STRAIGHT;
+ data.dictfile = (char *) weak_hash_check;
+
+ uint cmd0_rule_old = data.kernel_rules_buf[0].cmds[0];
+
+ data.kernel_rules_buf[0].cmds[0] = 0;
/**
* run the kernel
if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
- run_kernel (KERN_RUN_WEAK, device_param, 1);
+ run_kernel (KERN_RUN_1, device_param, 1);
}
else
{
device_param->kernel_params_buf32[30] = 0;
device_param->kernel_params_buf32[31] = 0;
- data.dictfile = dictfile_old;
- data.dictfile2 = dictfile2_old;
- data.mask = mask_old;
- data.attack_mode = attack_mode_old;
+ data.dictfile = dictfile_old;
+
+ data.kernel_rules_buf[0].cmds[0] = cmd0_rule_old;
}
// hlfmt hashcat
// wrapper around mymalloc for ADL
+#if defined(HAVE_HWMON) && defined(HAVE_ADL)
void *__stdcall ADL_Main_Memory_Alloc (const int iSize)
{
return mymalloc (iSize);
}
+#endif
-static uint generate_bitmaps (const uint digests_cnt, const uint dgst_size, const uint dgst_shifts, char *digests_buf_ptr, const uint bitmap_mask, const uint bitmap_size, uint *bitmap_a, uint *bitmap_b, uint *bitmap_c, uint *bitmap_d, const uint64_t collisions_max)
+static uint generate_bitmaps (const uint digests_cnt, const uint dgst_size, const uint dgst_shifts, char *digests_buf_ptr, const uint bitmap_mask, const uint bitmap_size, uint *bitmap_a, uint *bitmap_b, uint *bitmap_c, uint *bitmap_d, const u64 collisions_max)
{
- uint64_t collisions = 0;
+ u64 collisions = 0;
const uint dgst_pos0 = data.dgst_pos0;
const uint dgst_pos1 = data.dgst_pos1;
digests_buf_ptr += dgst_size;
- const uint val0 = 1 << (digest_ptr[dgst_pos0] & 0x1f);
- const uint val1 = 1 << (digest_ptr[dgst_pos1] & 0x1f);
- const uint val2 = 1 << (digest_ptr[dgst_pos2] & 0x1f);
- const uint val3 = 1 << (digest_ptr[dgst_pos3] & 0x1f);
+ const uint val0 = 1u << (digest_ptr[dgst_pos0] & 0x1f);
+ const uint val1 = 1u << (digest_ptr[dgst_pos1] & 0x1f);
+ const uint val2 = 1u << (digest_ptr[dgst_pos2] & 0x1f);
+ const uint val3 = 1u << (digest_ptr[dgst_pos3] & 0x1f);
const uint idx0 = (digest_ptr[dgst_pos0] >> dgst_shifts) & bitmap_mask;
const uint idx1 = (digest_ptr[dgst_pos1] >> dgst_shifts) & bitmap_mask;
putenv ((char *) "DISPLAY=:0");
}
- /*
if (getenv ("GPU_MAX_ALLOC_PERCENT") == NULL)
putenv ((char *) "GPU_MAX_ALLOC_PERCENT=100");
+ if (getenv ("CPU_MAX_ALLOC_PERCENT") == NULL)
+ putenv ((char *) "CPU_MAX_ALLOC_PERCENT=100");
+
if (getenv ("GPU_USE_SYNC_OBJECTS") == NULL)
putenv ((char *) "GPU_USE_SYNC_OBJECTS=1");
- */
/**
* Real init
uint username = USERNAME;
uint remove = REMOVE;
uint remove_timer = REMOVE_TIMER;
- uint64_t skip = SKIP;
- uint64_t limit = LIMIT;
+ u64 skip = SKIP;
+ u64 limit = LIMIT;
uint keyspace = KEYSPACE;
uint potfile_disable = POTFILE_DISABLE;
uint debug_mode = DEBUG_MODE;
uint increment = INCREMENT;
uint increment_min = INCREMENT_MIN;
uint increment_max = INCREMENT_MAX;
+ #ifndef OSX
char *cpu_affinity = NULL;
+ #endif
char *opencl_devices = NULL;
- char *opencl_platform = NULL;
+ char *opencl_platforms = NULL;
char *opencl_device_types = NULL;
+ uint opencl_vector_width = OPENCL_VECTOR_WIDTH;
char *truecrypt_keyfiles = NULL;
uint workload_profile = WORKLOAD_PROFILE;
uint kernel_accel = KERNEL_ACCEL;
uint kernel_loops = KERNEL_LOOPS;
+ #ifdef HAVE_HWMON
uint gpu_temp_disable = GPU_TEMP_DISABLE;
uint gpu_temp_abort = GPU_TEMP_ABORT;
uint gpu_temp_retain = GPU_TEMP_RETAIN;
+ #ifdef HAVE_ADL
uint powertune_enable = POWERTUNE_ENABLE;
+ #endif
+ #endif
uint logfile_disable = LOGFILE_DISABLE;
uint segment_size = SEGMENT_SIZE;
uint scrypt_tmto = SCRYPT_TMTO;
#define IDX_MARKOV_HCSTAT 0xff24
#define IDX_CPU_AFFINITY 0xff25
#define IDX_OPENCL_DEVICES 'd'
- #define IDX_OPENCL_PLATFORM 0xff72
+ #define IDX_OPENCL_PLATFORMS 0xff72
#define IDX_OPENCL_DEVICE_TYPES 0xff73
+ #define IDX_OPENCL_VECTOR_WIDTH 0xff74
#define IDX_WORKLOAD_PROFILE 'w'
#define IDX_KERNEL_ACCEL 'n'
#define IDX_KERNEL_LOOPS 'u'
{"markov-classic", no_argument, 0, IDX_MARKOV_CLASSIC},
{"markov-threshold", required_argument, 0, IDX_MARKOV_THRESHOLD},
{"markov-hcstat", required_argument, 0, IDX_MARKOV_HCSTAT},
+ #ifndef OSX
{"cpu-affinity", required_argument, 0, IDX_CPU_AFFINITY},
+ #endif
{"opencl-devices", required_argument, 0, IDX_OPENCL_DEVICES},
- {"opencl-platform", required_argument, 0, IDX_OPENCL_PLATFORM},
+ {"opencl-platforms", required_argument, 0, IDX_OPENCL_PLATFORMS},
{"opencl-device-types", required_argument, 0, IDX_OPENCL_DEVICE_TYPES},
+ {"opencl-vector-width", required_argument, 0, IDX_OPENCL_VECTOR_WIDTH},
{"workload-profile", required_argument, 0, IDX_WORKLOAD_PROFILE},
{"kernel-accel", required_argument, 0, IDX_KERNEL_ACCEL},
{"kernel-loops", required_argument, 0, IDX_KERNEL_LOOPS},
+ #ifdef HAVE_HWMON
{"gpu-temp-disable", no_argument, 0, IDX_GPU_TEMP_DISABLE},
{"gpu-temp-abort", required_argument, 0, IDX_GPU_TEMP_ABORT},
{"gpu-temp-retain", required_argument, 0, IDX_GPU_TEMP_RETAIN},
+ #ifdef HAVE_ADL
{"powertune-enable", no_argument, 0, IDX_POWERTUNE_ENABLE},
+ #endif
+ #endif // HAVE_HWMON
{"logfile-disable", no_argument, 0, IDX_LOGFILE_DISABLE},
{"truecrypt-keyfiles", required_argument, 0, IDX_TRUECRYPT_KEYFILES},
{"segment-size", required_argument, 0, IDX_SEGMENT_SIZE},
char **rp_files = (char **) mycalloc (argc, sizeof (char *));
- int option_index;
- int c;
+ int option_index = 0;
+ int c = -1;
optind = 1;
optopt = 0;
- option_index = 0;
while (((c = getopt_long (argc, argv, short_options, long_options, &option_index)) != -1) && optopt == 0)
{
if (version)
{
- log_info (VERSION_TXT);
+ log_info ("%s (%s)", VERSION_TAG, VERSION_SUM);
return (0);
}
myfree (exec_path);
+ /**
+ * kernel cache, we need to make sure folder exist
+ */
+
+ int kernels_folder_size = strlen (profile_dir) + 1 + 7 + 1 + 1;
+
+ char *kernels_folder = (char *) mymalloc (kernels_folder_size);
+
+ snprintf (kernels_folder, kernels_folder_size - 1, "%s/kernels", profile_dir);
+
+ mkdir (kernels_folder, 0700);
+
+ myfree (kernels_folder);
+
/**
* session
*/
uint remove_timer_chgd = 0;
uint increment_min_chgd = 0;
uint increment_max_chgd = 0;
- uint gpu_temp_abort_chgd = 0;
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
uint gpu_temp_retain_chgd = 0;
+ uint gpu_temp_abort_chgd = 0;
+ #endif
optind = 1;
optopt = 0;
case IDX_HEX_CHARSET: hex_charset = 1; break;
case IDX_HEX_SALT: hex_salt = 1; break;
case IDX_HEX_WORDLIST: hex_wordlist = 1; break;
+ #ifndef OSX
case IDX_CPU_AFFINITY: cpu_affinity = optarg; break;
+ #endif
case IDX_OPENCL_DEVICES: opencl_devices = optarg; break;
- case IDX_OPENCL_PLATFORM: opencl_platform = optarg; break;
+ case IDX_OPENCL_PLATFORMS: opencl_platforms = optarg; break;
case IDX_OPENCL_DEVICE_TYPES:
opencl_device_types = optarg; break;
+ case IDX_OPENCL_VECTOR_WIDTH:
+ opencl_vector_width = atoi (optarg); break;
case IDX_WORKLOAD_PROFILE: workload_profile = atoi (optarg); break;
case IDX_KERNEL_ACCEL: kernel_accel = atoi (optarg);
kernel_accel_chgd = 1; break;
case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
kernel_loops_chgd = 1; break;
+ #ifdef HAVE_HWMON
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
- case IDX_GPU_TEMP_ABORT: gpu_temp_abort_chgd = 1;
- gpu_temp_abort = atoi (optarg); break;
- case IDX_GPU_TEMP_RETAIN: gpu_temp_retain_chgd = 1;
- gpu_temp_retain = atoi (optarg); break;
+ case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg);
+ #ifdef HAVE_ADL
+ gpu_temp_abort_chgd = 1;
+ #endif
+ break;
+ case IDX_GPU_TEMP_RETAIN: gpu_temp_retain = atoi (optarg);
+ #ifdef HAVE_ADL
+ gpu_temp_retain_chgd = 1;
+ #endif
+ break;
+ #ifdef HAVE_ADL
case IDX_POWERTUNE_ENABLE: powertune_enable = 1; break;
+ #endif
+ #endif // HAVE_HWMON
case IDX_LOGFILE_DISABLE: logfile_disable = 1; break;
case IDX_TRUECRYPT_KEYFILES: truecrypt_keyfiles = optarg; break;
case IDX_SEGMENT_SIZE: segment_size = atoi (optarg); break;
{
if (benchmark == 1)
{
- log_info ("%s v%.2f starting in benchmark-mode...", PROGNAME, (float) VERSION_BIN / 100);
+ log_info ("%s %s (%s) starting in benchmark-mode...", PROGNAME, VERSION_TAG, VERSION_SUM);
log_info ("");
}
else if (restore == 1)
{
- log_info ("%s v%.2f starting in restore-mode...", PROGNAME, (float) VERSION_BIN / 100);
+ log_info ("%s %s (%s) starting in restore-mode...", PROGNAME, VERSION_TAG, VERSION_SUM);
log_info ("");
}
else
{
- log_info ("%s v%.2f starting...", PROGNAME, (float) VERSION_BIN / 100);
+ log_info ("%s %s (%s) starting...", PROGNAME, VERSION_TAG, VERSION_SUM);
log_info ("");
}
return (-1);
}
+ if ((opencl_vector_width != 0) && (opencl_vector_width != 1) && (opencl_vector_width != 2) && (opencl_vector_width != 4) && (opencl_vector_width != 8))
+ {
+ log_error ("ERROR: opencl-vector-width %i not allowed", opencl_vector_width);
+
+ return (-1);
+ }
+
if (show == 1 || left == 1)
{
attack_mode = ATTACK_MODE_NONE;
}
}
+ if (attack_mode != ATTACK_MODE_STRAIGHT)
+ {
+ if ((weak_hash_threshold != WEAK_HASH_THRESHOLD) && (weak_hash_threshold != 0))
+ {
+ log_error ("ERROR: setting --weak-hash-threshold allowed only in straight-attack mode");
+
+ return (-1);
+ }
+
+ weak_hash_threshold = 0;
+ }
+
/**
* induction directory
*/
data.benchmark = benchmark;
data.skip = skip;
data.limit = limit;
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
data.powertune_enable = powertune_enable;
+ #endif
data.logfile_disable = logfile_disable;
data.truecrypt_keyfiles = truecrypt_keyfiles;
data.scrypt_tmto = scrypt_tmto;
* cpu affinity
*/
+ #ifndef OSX
if (cpu_affinity)
{
set_cpu_affinity (cpu_affinity);
}
+ #endif
if (rp_gen_seed_chgd == 0)
{
logfile_top_uint (force);
logfile_top_uint (kernel_accel);
logfile_top_uint (kernel_loops);
+ #ifdef HAVE_HWMON
logfile_top_uint (gpu_temp_abort);
logfile_top_uint (gpu_temp_disable);
logfile_top_uint (gpu_temp_retain);
+ #endif
logfile_top_uint (hash_mode);
logfile_top_uint (hex_charset);
logfile_top_uint (hex_salt);
logfile_top_uint (outfile_check_timer);
logfile_top_uint (outfile_format);
logfile_top_uint (potfile_disable);
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
logfile_top_uint (powertune_enable);
+ #endif
logfile_top_uint (scrypt_tmto);
logfile_top_uint (quiet);
logfile_top_uint (remove);
logfile_top_uint64 (limit);
logfile_top_uint64 (skip);
logfile_top_char (separator);
+ #ifndef OSX
logfile_top_string (cpu_affinity);
+ #endif
logfile_top_string (custom_charset_1);
logfile_top_string (custom_charset_2);
logfile_top_string (custom_charset_3);
logfile_top_string (custom_charset_4);
logfile_top_string (debug_file);
logfile_top_string (opencl_devices);
- logfile_top_string (opencl_platform);
+ logfile_top_string (opencl_platforms);
logfile_top_string (opencl_device_types);
+ logfile_top_uint (opencl_vector_width);
logfile_top_string (induction_dir);
logfile_top_string (markov_hcstat);
logfile_top_string (outfile);
logfile_top_string (truecrypt_keyfiles);
/**
- * device types filter
+ * OpenCL platform selection
*/
- cl_device_type device_types_filter = setup_device_types_filter (opencl_device_types);
+ u32 opencl_platforms_filter = setup_opencl_platforms_filter (opencl_platforms);
+
+ /**
+ * OpenCL device selection
+ */
+
+ u32 devices_filter = setup_devices_filter (opencl_devices);
/**
- * devices
+ * OpenCL device type selection
*/
- uint opencl_devicemask = devices_to_devicemask (opencl_devices);
+ cl_device_type device_types_filter = setup_device_types_filter (opencl_device_types);
/**
* benchmark
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_NOT_SALTED
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
parse_func = hmacsha512_parse_hash;
sort_by_digest = sort_by_digest_8_8;
opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_NOT_ITERATED;
dgst_pos0 = 14;
dgst_pos1 = 15;
parse_func = hmacsha512_parse_hash;
sort_by_digest = sort_by_digest_8_8;
opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_NOT_ITERATED;
dgst_pos0 = 14;
dgst_pos1 = 15;
dgst_size = DGST_SIZE_8_8;
parse_func = sha512crypt_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
parse_func = keccak_parse_hash;
sort_by_digest = sort_by_digest_8_25;
opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 2;
dgst_pos1 = 3;
dgst_size = DGST_SIZE_8_8;
parse_func = truecrypt_parse_hash_1k;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = truecrypt_parse_hash_1k;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = truecrypt_parse_hash_1k;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = sha512aix_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_16;
parse_func = sha512osx_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_16;
parse_func = sha512grub_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
sort_by_digest = sort_by_digest_4_4;
opti_type = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
- dgst_pos0 = 3;
- dgst_pos1 = 7;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
dgst_pos2 = 2;
- dgst_pos3 = 6;
+ dgst_pos3 = 3;
break;
case 7600: hash_type = HASH_TYPE_SHA1;
dgst_size = DGST_SIZE_8_8;
parse_func = drupal7_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_NOT_SALTED
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 6;
dgst_pos1 = 7;
dgst_size = DGST_SIZE_8_16;
parse_func = pbkdf2_sha512_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = ecryptfs_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_16;
parse_func = oraclet_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
* potfile
*/
- char potfile[256];
-
- memset (potfile, 0, sizeof (potfile));
+ char potfile[256] = { 0 };
snprintf (potfile, sizeof (potfile) - 1, "%s/%s.pot", session_dir, session);
continue;
}
+ if (plain_len >= 255) continue;
+
memcpy (pot_ptr->plain_buf, plain_buf, plain_len);
pot_ptr->plain_len = plain_len;
* charsets : keep them together for more easy maintainnce
*/
- cs_t mp_sys[6];
- cs_t mp_usr[4];
-
- memset (mp_sys, 0, sizeof (mp_sys));
- memset (mp_usr, 0, sizeof (mp_usr));
+ cs_t mp_sys[6] = { { { 0 }, 0 } };
+ cs_t mp_usr[4] = { { { 0 }, 0 } };
mp_setup_sys (mp_sys);
if ((username && (remove || show)) || (opts_type & OPTS_TYPE_HASH_COPY))
{
- uint32_t hash_pos;
+ u32 hash_pos;
for (hash_pos = 0; hash_pos < hashes_avail; hash_pos++)
{
uint hccap_size = sizeof (hccap_t);
- char in[hccap_size];
+ char *in = (char *) mymalloc (hccap_size);
while (!feof (fp))
{
- int n = fread (&in, hccap_size, 1, fp);
+ int n = fread (in, hccap_size, 1, fp);
if (n != 1)
{
wpa_t *wpa = (wpa_t *) hashes_buf[hashes_cnt].esalt;
- unsigned char *pke_ptr = (unsigned char *) wpa->pke;
+ u8 *pke_ptr = (u8 *) wpa->pke;
// do the appending task
}
fclose (fp);
+
+ myfree (in);
}
else if (hash_mode == 3000)
{
pke[i] = byte_swap_32 (wpa->pke[i]);
}
- unsigned char mac1[6];
- unsigned char mac2[6];
+ u8 mac1[6];
+ u8 mac2[6];
memcpy (mac1, pke_ptr + 23, 6);
memcpy (mac2, pke_ptr + 29, 6);
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != (unsigned char) hex_to_char (&mac1_pos[j]))
+ if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
found = NULL;
break;
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != (unsigned char) hex_to_char (&mac2_pos[j]))
+ if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
found = NULL;
break;
do
{
- truecrypt_crc32 (keyfile, (unsigned char *) keyfile_buf);
+ truecrypt_crc32 (keyfile, (u8 *) keyfile_buf);
} while ((keyfile = strtok (NULL, ",")) != NULL);
* Some algorithm, like descrypt, can benefit from JIT compilation
*/
- uint force_jit_compilation = 0;
+ int force_jit_compilation = -1;
if (hash_mode == 8900)
{
data.kernel_rules_buf = kernel_rules_buf;
/**
- * platform
+ * OpenCL platforms: detect
*/
- cl_platform_id CL_platforms[CL_PLATFORMS_MAX];
+ cl_platform_id platforms[CL_PLATFORMS_MAX];
- uint CL_platforms_cnt = 0;
+ cl_uint platforms_cnt = 0;
- hc_clGetPlatformIDs (CL_PLATFORMS_MAX, CL_platforms, &CL_platforms_cnt);
+ cl_device_id platform_devices[DEVICES_MAX];
- if (CL_platforms_cnt == 0)
+ cl_uint platform_devices_cnt;
+
+ hc_clGetPlatformIDs (CL_PLATFORMS_MAX, platforms, &platforms_cnt);
+
+ if (platforms_cnt == 0)
{
log_error ("ERROR: No OpenCL compatible platform found");
return (-1);
}
- int CL_platform_sel = 1;
+ /**
+ * OpenCL platforms: For each platform check if we need to unset features that we can not use, eg: temp_retain
+ */
- if (opencl_platform != NULL)
+ for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- CL_platform_sel = atoi (opencl_platform);
- }
+ cl_platform_id platform = platforms[platform_id];
- if (CL_platforms_cnt > 1)
- {
- if (opencl_platform == NULL)
- {
- log_error ("ERROR: Too many OpenCL compatible platforms found");
+ char platform_vendor[INFOSZ] = { 0 };
- log_info ("Please select a single platform using the --opencl-platform option");
- log_info ("");
- log_info ("Available OpenCL platforms:");
- log_info ("");
+ hc_clGetPlatformInfo (platform, CL_PLATFORM_VENDOR, sizeof (platform_vendor), platform_vendor, NULL);
- for (uint i = 0; i < CL_platforms_cnt; i++)
- {
- char CL_platform_vendor[INFOSZ];
+ #ifdef HAVE_HWMON
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ if (strcmp (platform_vendor, CL_VENDOR_NV) == 0)
+ {
+ // make sure that we do not directly control the fan for NVidia
- memset (CL_platform_vendor, 0, sizeof (CL_platform_vendor));
+ gpu_temp_retain = 0;
- hc_clGetPlatformInfo (CL_platforms[i], CL_PLATFORM_VENDOR, sizeof (CL_platform_vendor), CL_platform_vendor, NULL);
+ data.gpu_temp_retain = gpu_temp_retain;
+ }
+ #endif // HAVE_NVML || HAVE_NVAPI
+ #endif
+ }
- log_info ("* %d = %s", i + 1, CL_platform_vendor);
- }
+ /**
+ * OpenCL devices: simply push all devices from all platforms into the same device array
+ */
- log_info ("");
+ hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
- return (-1);
- }
- else
- {
- if (CL_platform_sel < 1)
- {
- log_error ("ERROR: --opencl-platform < 1");
+ data.devices_param = devices_param;
- return (-1);
- }
+ uint devices_cnt = 0;
- if (CL_platform_sel > (int) CL_platforms_cnt)
- {
- log_error ("ERROR: invalid OpenCL platforms selected");
+ uint devices_active = 0;
- return (-1);
- }
- }
- }
- else
+ for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- if (CL_platform_sel != 1)
+ if ((opencl_platforms_filter & (1 << platform_id)) == 0) continue;
+
+ cl_platform_id platform = platforms[platform_id];
+
+ hc_clGetDeviceIDs (platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
+
+ for (uint platform_devices_id = 0; platform_devices_id < platform_devices_cnt; platform_devices_id++)
{
- log_error ("ERROR: OpenCL platform number %d is not available", CL_platform_sel);
+ const uint device_id = devices_cnt;
- return (-1);
- }
- }
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- // zero-indexed: not starting to count at 1, as user does
+ device_param->device = platform_devices[platform_devices_id];
- CL_platform_sel -= 1;
+ device_param->device_id = device_id;
- cl_platform_id CL_platform = CL_platforms[CL_platform_sel];
+ device_param->platform_devices_id = platform_devices_id;
- char CL_platform_vendor[INFOSZ];
+ // device_type
- memset (CL_platform_vendor, 0, sizeof (CL_platform_vendor));
+ cl_device_type device_type;
- hc_clGetPlatformInfo (CL_platform, CL_PLATFORM_VENDOR, sizeof (CL_platform_vendor), CL_platform_vendor, NULL);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
- uint vendor_id;
+ device_type &= ~CL_DEVICE_TYPE_DEFAULT;
- if (strcmp (CL_platform_vendor, CL_VENDOR_AMD) == 0)
- {
- vendor_id = VENDOR_ID_AMD;
- }
- else if (strcmp (CL_platform_vendor, CL_VENDOR_NV) == 0)
- {
- vendor_id = VENDOR_ID_NV;
+ device_param->device_type = device_type;
- // make sure that we do not directly control the fan for NVidia
+ // vendor_id
- gpu_temp_retain = 0;
+ cl_uint vendor_id = 0;
- data.gpu_temp_retain = gpu_temp_retain;
- }
- else if (strcmp (CL_platform_vendor, CL_VENDOR_POCL) == 0)
- {
- if (force == 0)
- {
- log_error ("");
- log_error ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
- log_error ("You are STRONGLY encouraged not to use it");
- log_error ("You can use --force to override this but do not post error reports if you do so");
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_VENDOR_ID, sizeof (vendor_id), &vendor_id, NULL);
- return (-1);
- }
+ device_param->vendor_id = vendor_id;
- vendor_id = VENDOR_ID_GENERIC;
- }
- else
- {
- vendor_id = VENDOR_ID_GENERIC;
- }
+ // device_name
- if (vendor_id == VENDOR_ID_GENERIC)
- {
- log_error ("Warning: unknown OpenCL vendor '%s' detected", CL_platform_vendor);
+ char *device_name = (char *) mymalloc (INFOSZ);
- gpu_temp_disable = 1;
- }
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_NAME, INFOSZ, device_name, NULL);
- data.vendor_id = vendor_id;
+ device_param->device_name = device_name;
- /**
- * cached kernel path depends on vendor_id which we don't know, so create it here
- */
+ // device_version
- int vendor_id_folder_size = strlen (profile_dir) + 1 + 7 + 1 + 10 + 1;
+ char *device_version = (char *) mymalloc (INFOSZ);
- char *vendor_id_folder = (char *) mymalloc (vendor_id_folder_size);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_VERSION, INFOSZ, device_version, NULL);
- snprintf (vendor_id_folder, vendor_id_folder_size - 1, "%s/kernels", profile_dir);
+ device_param->device_version = device_version;
- mkdir (vendor_id_folder, 0700);
+ if (strstr (device_version, "pocl"))
+ {
+ // pocl returns the real vendor_id in CL_DEVICE_VENDOR_ID which causes many problems because of hms and missing amd_bfe () etc
+ // we need to overwrite vendor_id to avoid this. maybe open pocl issue?
- snprintf (vendor_id_folder, vendor_id_folder_size - 1, "%s/kernels/%d", profile_dir, vendor_id);
+ cl_uint vendor_id = VENDOR_ID_GENERIC;
- mkdir (vendor_id_folder, 0700);
+ device_param->vendor_id = vendor_id;
+ }
- myfree (vendor_id_folder);
+ // max_compute_units
- /**
- * devices
- */
+ cl_uint vector_width;
- cl_device_id devices_all[DEVICES_MAX];
- cl_device_id devices[DEVICES_MAX];
+ if (opencl_vector_width == OPENCL_VECTOR_WIDTH)
+ {
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
- uint devices_all_cnt = 0;
+ if ((vendor_id == VENDOR_ID_NV) && (strstr (device_name, " Ti") || strstr (device_name, " TI")))
+ {
+ // Yeah that's a super bad hack, but there's no other attribute we could use
- hc_clGetDeviceIDs (CL_platform, device_types_filter, DEVICES_MAX, devices_all, (uint *) &devices_all_cnt);
+ if (vector_width < 2) vector_width *= 2;
+ }
- int hm_adapters_all = devices_all_cnt;
+ if (opti_type & OPTI_TYPE_USES_BITS_64)
+ {
+ if (vector_width > 1) vector_width /= 2;
+ }
+ }
+ else
+ {
+ vector_width = opencl_vector_width;
+ }
- hm_attrs_t hm_adapter_all[DEVICES_MAX];
+ if (vector_width > 8) vector_width = 8;
- memset (hm_adapter_all, 0, sizeof (hm_adapter_all));
+ device_param->vector_width = vector_width;
- if (gpu_temp_disable == 0)
- {
- if (vendor_id == VENDOR_ID_NV)
- {
- #ifdef LINUX
- HM_LIB hm_dll = hm_init ();
+ // max_compute_units
- data.hm_dll = hm_dll;
+ cl_uint device_processors;
- if (hc_NVML_nvmlInit (hm_dll) == NVML_SUCCESS)
- {
- HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (device_processors), &device_processors, NULL);
- int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
+ device_param->device_processors = device_processors;
- int tmp_out = 0;
+ // max_mem_alloc_size
- for (int i = 0; i < tmp_in; i++)
- {
- hm_adapter_all[tmp_out++].adapter_index.nv = nvGPUHandle[i];
- }
+ cl_ulong device_maxmem_alloc;
- hm_adapters_all = tmp_out;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (device_maxmem_alloc), &device_maxmem_alloc, NULL);
- for (int i = 0; i < tmp_out; i++)
- {
- unsigned int speed;
+ device_param->device_maxmem_alloc = device_maxmem_alloc;
- if (hc_NVML_nvmlDeviceGetFanSpeed (hm_dll, 1, hm_adapter_all[i].adapter_index.nv, &speed) != NVML_ERROR_NOT_SUPPORTED) hm_adapter_all[i].fan_supported = 1;
- }
- }
- #endif
+ // max_mem_alloc_size
- #ifdef WIN
- if (NvAPI_Initialize () == NVAPI_OK)
- {
- HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
+ cl_ulong device_global_mem;
- int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (device_global_mem), &device_global_mem, NULL);
- int tmp_out = 0;
+ device_param->device_global_mem = device_global_mem;
- for (int i = 0; i < tmp_in; i++)
- {
- hm_adapter_all[tmp_out++].adapter_index.nv = nvGPUHandle[i];
- }
+ // max_clock_frequency
- hm_adapters_all = tmp_out;
+ cl_uint device_maxclock_frequency;
- for (int i = 0; i < tmp_out; i++)
- {
- NvU32 speed;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (device_maxclock_frequency), &device_maxclock_frequency, NULL);
- if (NvAPI_GPU_GetTachReading (hm_adapter_all[i].adapter_index.nv, &speed) != NVAPI_NOT_SUPPORTED) hm_adapter_all[i].fan_supported = 1;
- }
- }
- #endif
- }
+ device_param->device_maxclock_frequency = device_maxclock_frequency;
- if (vendor_id == VENDOR_ID_AMD)
- {
- HM_LIB hm_dll = hm_init ();
+ // skipped
- data.hm_dll = hm_dll;
+ const u32 skipped1 = ((devices_filter & (1 << device_id)) == 0);
+ const u32 skipped2 = ((device_types_filter & (device_type)) == 0);
- if (hc_ADL_Main_Control_Create (hm_dll, ADL_Main_Memory_Alloc, 0) == ADL_OK)
- {
- // total number of adapters
+ device_param->skipped = (skipped1 || skipped2);
- int hm_adapters_num;
+ // driver_version
- if (get_adapters_num_amd (hm_dll, &hm_adapters_num) != 0) return (-1);
+ char *driver_version = (char *) mymalloc (INFOSZ);
- // adapter info
+ hc_clGetDeviceInfo (device_param->device, CL_DRIVER_VERSION, INFOSZ, driver_version, NULL);
- LPAdapterInfo lpAdapterInfo = hm_get_adapter_info_amd (hm_dll, hm_adapters_num);
+ device_param->driver_version = driver_version;
- if (lpAdapterInfo == NULL) return (-1);
+ // device_name_chksum
- // get a list (of ids of) valid/usable adapters
+ char *device_name_chksum = (char *) mymalloc (INFOSZ);
- int num_adl_adapters = 0;
+ #if __x86_64__
+ snprintf (device_name_chksum, INFOSZ - 1, "%u-%u-%u-%s-%s-%s-%u", 64, device_param->vendor_id, device_param->vector_width, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ #else
+ snprintf (device_name_chksum, INFOSZ - 1, "%u-%u-%u-%s-%s-%s-%u", 32, device_param->vendor_id, device_param->vector_width, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ #endif
- uint32_t *valid_adl_device_list = hm_get_list_valid_adl_adapters (hm_adapters_num, &num_adl_adapters, lpAdapterInfo);
+ uint device_name_digest[4];
- if (num_adl_adapters > 0)
- {
- hc_thread_mutex_lock (mux_adl);
+ device_name_digest[0] = 0;
+ device_name_digest[1] = 0;
+ device_name_digest[2] = 0;
+ device_name_digest[3] = 0;
- // hm_get_opencl_busid_devid (hm_adapter_all, devices_all_cnt, devices_all);
+ md5_64 ((uint *) device_name_chksum, device_name_digest);
- hm_get_adapter_index_amd (hm_adapter_all, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
+ sprintf (device_name_chksum, "%08x", device_name_digest[0]);
- hm_get_overdrive_version (hm_dll, hm_adapter_all, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
- hm_check_fanspeed_control (hm_dll, hm_adapter_all, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
+ device_param->device_name_chksum = device_name_chksum;
- hc_thread_mutex_unlock (mux_adl);
- }
+ // device_processor_cores
- hm_adapters_all = num_adl_adapters;
+ if (device_type & CL_DEVICE_TYPE_CPU)
+ {
+ cl_uint device_processor_cores = 1;
- myfree (valid_adl_device_list);
- myfree (lpAdapterInfo);
+ device_param->device_processor_cores = device_processor_cores;
}
- }
- }
- if (hm_adapters_all == 0)
- {
- gpu_temp_disable = 1;
- }
+ if (device_type & CL_DEVICE_TYPE_GPU)
+ {
+ if (vendor_id == VENDOR_ID_AMD)
+ {
+ cl_uint device_processor_cores = 0;
- if (gpu_temp_disable == 1)
- {
- gpu_temp_abort = 0;
- gpu_temp_retain = 0;
- }
+ #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
- /**
- * enable custom signal handler(s)
- */
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
- if (benchmark == 0)
- {
- hc_signal (sigHandler_default);
- }
- else
- {
- hc_signal (sigHandler_benchmark);
- }
+ device_param->device_processor_cores = device_processor_cores;
+ }
+ else if (vendor_id == VENDOR_ID_NV)
+ {
+ cl_uint kernel_exec_timeout = 0;
- /**
- * devices mask and properties
- */
+ #define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
- uint hm_adapter_index = 0;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
- uint devices_cnt = 0;
+ device_param->kernel_exec_timeout = kernel_exec_timeout;
- for (uint device_all_id = 0; device_all_id < devices_all_cnt; device_all_id++)
- {
- const uint device_id = devices_cnt;
+ cl_uint device_processor_cores = 0;
- devices[device_id] = devices_all[device_all_id];
+ #define CL_DEVICE_WARP_SIZE_NV 0x4003
- cl_device_type device_type;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+ device_param->device_processor_cores = device_processor_cores;
- // skip the device, if the user did specify a list of GPUs to skip
+ cl_uint sm_minor = 0;
+ cl_uint sm_major = 0;
- if (opencl_devicemask)
- {
- uint device_all_id_mask = 1 << device_all_id;
+ #define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
+ #define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
- if ((device_all_id_mask & opencl_devicemask) != device_all_id_mask)
- {
- if (quiet == 0 && algorithm_pos == 0) log_info ("Device #%d: skipped by user", device_all_id_mask + 1);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
- if (device_type & CL_DEVICE_TYPE_GPU) hm_adapter_index++;
+ device_param->sm_minor = sm_minor;
+ device_param->sm_major = sm_major;
+ }
+ else
+ {
+ cl_uint device_processor_cores = 1;
- continue;
+ device_param->device_processor_cores = device_processor_cores;
+ }
}
- }
- char device_name[INFOSZ];
+ // display results
- memset (device_name, 0, sizeof (device_name));
+ if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
+ {
+ if (device_param->skipped == 0)
+ {
+ log_info ("Device #%u: %s, %lu/%lu MB allocatable, %dMhz, %uMCU",
+ device_id + 1,
+ device_name,
+ (unsigned int) (device_maxmem_alloc / 1024 / 1024),
+ (unsigned int) (device_global_mem / 1024 / 1024),
+ (unsigned int) (device_maxclock_frequency),
+ (unsigned int) device_processors);
+ }
+ else
+ {
+ log_info ("Device #%u: %s, skipped",
+ device_id + 1,
+ device_name);
+ }
+ }
- cl_ulong global_mem_size;
- cl_ulong max_mem_alloc_size;
- cl_uint max_clock_frequency;
- cl_uint max_compute_units;
+ // common driver check
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_NAME, sizeof (device_name), &device_name, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (global_mem_size), &global_mem_size, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (max_mem_alloc_size), &max_mem_alloc_size, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (max_clock_frequency), &max_clock_frequency, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (max_compute_units), &max_compute_units, NULL);
+ if (device_param->skipped == 0)
+ {
+ if (strstr (device_version, "pocl"))
+ {
+ if (force == 0)
+ {
+ log_info ("");
+ log_info ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
+ log_info ("You are STRONGLY encouraged not to use it");
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
- if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
- {
- log_info ("Device #%u: %s, %lu/%lu MB allocatable, %dMhz, %uMCU",
- device_all_id + 1,
- device_name,
- (unsigned int) (max_mem_alloc_size / 1024 / 1024),
- (unsigned int) (global_mem_size / 1024 / 1024),
- (unsigned int) (max_clock_frequency),
- (unsigned int) max_compute_units);
- }
+ return (-1);
+ }
+ }
- // copy hm_adapter info to data.hm_device[]
+ if (device_type & CL_DEVICE_TYPE_GPU)
+ {
+ if (vendor_id == VENDOR_ID_NV)
+ {
+ if (device_param->kernel_exec_timeout != 0)
+ {
+ if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
+ if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
+ }
+ }
+ else if (vendor_id == VENDOR_ID_AMD)
+ {
+ int catalyst_check = (force == 1) ? 0 : 1;
- uint hm_adapter_cur = hm_adapter_index;
+ int catalyst_warn = 0;
- if ((device_type & CL_DEVICE_TYPE_GPU) == 0)
- {
- // assign a CPU adapter (i.e. not initialized hm_adapter_all[] entry)
+ int catalyst_broken = 0;
- hm_adapter_cur = devices_all_cnt - 1;
- }
+ if (catalyst_check == 1)
+ {
+ catalyst_warn = 1;
- memcpy (&data.hm_device[device_id], &hm_adapter_all[hm_adapter_cur], sizeof (hm_attrs_t));
+ // v14.9 and higher
+ if (atoi (device_param->driver_version) >= 1573)
+ {
+ catalyst_warn = 0;
+ }
- if (device_type & CL_DEVICE_TYPE_GPU)
- {
- hm_adapter_index++;
- }
+ catalyst_check = 0;
+ }
+
+ if (catalyst_broken == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! The installed catalyst driver in your system is known to be broken!");
+ log_info ("It will pass over cracked hashes and does not report them as cracked");
+ log_info ("You are STRONGLY encouraged not to use it");
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
+
+ return (-1);
+ }
+
+ if (catalyst_warn == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
+ log_info ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
+ log_info ("See oclHashcat's homepage for official supported catalyst drivers");
+ #ifdef _WIN
+ log_info ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
+ #endif
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
+
+ return (-1);
+ }
+ }
+ }
+
+ devices_active++;
+ }
- devices_cnt++;
+ // next please
+
+ devices_cnt++;
+ }
}
- if (devices_cnt == 0)
+ if (devices_active == 0)
{
- log_error ("ERROR: No devices left that matches your specification.");
+ log_error ("ERROR: No devices found/left");
return (-1);
}
data.devices_cnt = devices_cnt;
+ data.devices_active = devices_active;
+
if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
{
log_info ("");
}
/**
- * inform the user
+ * OpenCL devices: allocate buffer for device specific information
+ */
+
+ #ifdef HAVE_HWMON
+ int *temp_retain_fanspeed_value = (int *) mycalloc (devices_cnt, sizeof (int));
+
+ #ifdef HAVE_ADL
+ ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (devices_cnt, sizeof (ADLOD6MemClockState));
+
+ int *od_power_control_status = (int *) mycalloc (devices_cnt, sizeof (int));
+ #endif // ADL
+ #endif
+
+ /**
+ * enable custom signal handler(s)
+ */
+
+ if (benchmark == 0)
+ {
+ hc_signal (sigHandler_default);
+ }
+ else
+ {
+ hc_signal (sigHandler_benchmark);
+ }
+
+ /**
+ * User-defined GPU temp handling
*/
- // gpu temp sanity check
+ #ifdef HAVE_HWMON
+ if (gpu_temp_disable == 1)
+ {
+ gpu_temp_abort = 0;
+ gpu_temp_retain = 0;
+ }
if ((gpu_temp_abort != 0) && (gpu_temp_retain != 0))
{
data.gpu_temp_disable = gpu_temp_disable;
data.gpu_temp_abort = gpu_temp_abort;
data.gpu_temp_retain = gpu_temp_retain;
+ #endif
+
+ /**
+ * inform the user
+ */
if (data.quiet == 0)
{
for (uint i = 0; i < 32; i++)
{
- const uint opti_bit = 1 << i;
+ const uint opti_bit = 1u << i;
if (opti_type & opti_bit) log_info ("* %s", stroptitype (opti_bit));
}
* Watchdog and Temperature balance
*/
+ #ifdef HAVE_HWMON
if (gpu_temp_abort == 0)
{
log_info ("Watchdog: Temperature abort trigger disabled");
{
log_info ("Watchdog: Temperature retain trigger set to %uc", gpu_temp_retain);
}
+ #endif
}
+ if (data.quiet == 0) log_info ("");
+
/**
- * devices init
+ * HM devices: init
*/
- int *temp_retain_fanspeed_value = (int *) mycalloc (devices_cnt, sizeof (int));
-
- ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (devices_cnt, sizeof (ADLOD6MemClockState));
-
- int *od_power_control_status = (int *) mycalloc (devices_cnt, sizeof (int));
-
- hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (devices_cnt, sizeof (hc_device_param_t));
+ #ifdef HAVE_HWMON
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ hm_attrs_t hm_adapters_nv[DEVICES_MAX] = { { { 0 }, 0, 0 } };
+ #endif
- data.devices_param = devices_param;
+ #ifdef HAVE_ADL
+ hm_attrs_t hm_adapters_amd[DEVICES_MAX] = { { { 0 }, 0, 0 } };
+ #endif
- for (uint device_id = 0; device_id < devices_cnt; device_id++)
+ if (gpu_temp_disable == 0)
{
- hc_device_param_t *device_param = &data.devices_param[device_id];
-
- cl_device_id device = devices[device_id];
-
- device_param->device = device;
-
- cl_device_type device_type = 0;
-
- hc_clGetDeviceInfo (device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
-
- device_param->device_type = device_type;
-
- cl_uint max_compute_units = 0;
-
- hc_clGetDeviceInfo (device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (max_compute_units), &max_compute_units, NULL);
-
- device_param->device_processors = max_compute_units;
-
- cl_ulong max_mem_alloc_size = 0;
-
- hc_clGetDeviceInfo (device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (max_mem_alloc_size), &max_mem_alloc_size, NULL);
-
- device_param->device_maxmem_alloc = max_mem_alloc_size;
-
- char tmp[INFOSZ], t1[64];
-
- memset (tmp, 0, sizeof (tmp));
-
- hc_clGetDeviceInfo (device, CL_DEVICE_NAME, sizeof (tmp), &tmp, NULL);
-
- device_param->device_name = mystrdup (tmp);
-
- memset (tmp, 0, sizeof (tmp));
-
- hc_clGetDeviceInfo (device, CL_DEVICE_VERSION, sizeof (tmp), &tmp, NULL);
+ #if defined(WIN) && defined(HAVE_NVAPI)
+ if (NvAPI_Initialize () == NVAPI_OK)
+ {
+ HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
- memset (t1, 0, sizeof (t1));
+ int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
- sscanf (tmp, "%*16s %*16s %*16s (%[^)]16s)", t1);
+ int tmp_out = 0;
- device_param->device_version = mystrdup (t1);
+ for (int i = 0; i < tmp_in; i++)
+ {
+ hm_adapters_nv[tmp_out++].adapter_index.nv = nvGPUHandle[i];
+ }
- memset (tmp, 0, sizeof (tmp));
+ for (int i = 0; i < tmp_out; i++)
+ {
+ NvU32 speed;
- hc_clGetDeviceInfo (device, CL_DRIVER_VERSION, sizeof (tmp), &tmp, NULL);
+ if (NvAPI_GPU_GetTachReading (hm_adapters_nv[i].adapter_index.nv, &speed) != NVAPI_NOT_SUPPORTED) hm_adapters_nv[i].fan_supported = 1;
+ }
+ }
+ #endif // WIN && HAVE_NVAPI
- device_param->driver_version = mystrdup (tmp);
+ #if defined(LINUX) && defined(HAVE_NVML)
+ HM_LIB hm_dll_nv = hm_init (VENDOR_ID_NV);
- // create some filename that is easier to read on cached folder
+ data.hm_dll_nv = hm_dll_nv;
- snprintf (tmp, sizeof (tmp) - 1, "%s-%s-%s-%d", device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ if (hm_dll_nv)
+ {
+ if (hc_NVML_nvmlInit (hm_dll_nv) == NVML_SUCCESS)
+ {
+ HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
- uint device_name_digest[4];
+ int tmp_in = hm_get_adapter_index_nv (nvGPUHandle);
- device_name_digest[0] = 0;
- device_name_digest[1] = 0;
- device_name_digest[2] = 0;
- device_name_digest[3] = 0;
+ int tmp_out = 0;
- md5_64 ((uint *) tmp, device_name_digest);
+ for (int i = 0; i < tmp_in; i++)
+ {
+ hm_adapters_nv[tmp_out++].adapter_index.nv = nvGPUHandle[i];
+ }
- sprintf (tmp, "%08x", device_name_digest[0]);
+ for (int i = 0; i < tmp_out; i++)
+ {
+ unsigned int speed;
- device_param->device_name_chksum = mystrdup (tmp);
+ if (hc_NVML_nvmlDeviceGetFanSpeed (hm_dll_nv, 1, hm_adapters_nv[i].adapter_index.nv, &speed) != NVML_ERROR_NOT_SUPPORTED) hm_adapters_nv[i].fan_supported = 1;
+ }
+ }
+ }
+ #endif // LINUX && HAVE_NVML
- if (device_type & CL_DEVICE_TYPE_CPU)
- {
- cl_uint device_processor_cores = 1;
+ #ifdef HAVE_ADL
+ HM_LIB hm_dll_amd = hm_init (VENDOR_ID_AMD);
- device_param->device_processor_cores = device_processor_cores;
- }
+ data.hm_dll_amd = hm_dll_amd;
- if (device_type & CL_DEVICE_TYPE_GPU)
+ if (hm_dll_amd)
{
- if (vendor_id == VENDOR_ID_AMD)
+ if (hc_ADL_Main_Control_Create (hm_dll_amd, ADL_Main_Memory_Alloc, 0) == ADL_OK)
{
- cl_uint device_processor_cores = 0;
+ // total number of adapters
- #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
+ int hm_adapters_num;
- hc_clGetDeviceInfo (device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
+ if (get_adapters_num_amd (hm_dll_amd, &hm_adapters_num) != 0) return (-1);
- device_param->device_processor_cores = device_processor_cores;
- }
-
- if (vendor_id == VENDOR_ID_NV)
- {
- cl_uint kernel_exec_timeout = 0;
+ // adapter info
- #define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
+ LPAdapterInfo lpAdapterInfo = hm_get_adapter_info_amd (hm_dll_amd, hm_adapters_num);
- hc_clGetDeviceInfo (device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
+ if (lpAdapterInfo == NULL) return (-1);
- device_param->kernel_exec_timeout = kernel_exec_timeout;
+ // get a list (of ids of) valid/usable adapters
- cl_uint device_processor_cores = 0;
+ int num_adl_adapters = 0;
- #define CL_DEVICE_WARP_SIZE_NV 0x4003
+ u32 *valid_adl_device_list = hm_get_list_valid_adl_adapters (hm_adapters_num, &num_adl_adapters, lpAdapterInfo);
- hc_clGetDeviceInfo (device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
+ if (num_adl_adapters > 0)
+ {
+ hc_thread_mutex_lock (mux_adl);
- device_param->device_processor_cores = device_processor_cores;
+ // hm_get_opencl_busid_devid (hm_adapters_amd, devices_all_cnt, devices_all);
- cl_uint sm_minor = 0;
- cl_uint sm_major = 0;
+ hm_get_adapter_index_amd (hm_adapters_amd, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
- #define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
- #define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
+ hm_get_overdrive_version (hm_dll_amd, hm_adapters_amd, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
+ hm_check_fanspeed_control (hm_dll_amd, hm_adapters_amd, valid_adl_device_list, num_adl_adapters, lpAdapterInfo);
- hc_clGetDeviceInfo (device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
- hc_clGetDeviceInfo (device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
+ hc_thread_mutex_unlock (mux_adl);
+ }
- device_param->sm_minor = sm_minor;
- device_param->sm_major = sm_major;
+ myfree (valid_adl_device_list);
+ myfree (lpAdapterInfo);
}
}
+ #endif // HAVE_ADL
+ }
- /**
- * common driver check
- */
+ /**
+ * HM devices: copy
+ */
- if (device_type & CL_DEVICE_TYPE_GPU)
+ if (gpu_temp_disable == 0)
+ {
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
{
- if (vendor_id == VENDOR_ID_NV)
- {
- if (device_param->kernel_exec_timeout != 0)
- {
- if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
- if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
- }
- }
-
- if (vendor_id == VENDOR_ID_AMD)
- {
- int catalyst_check = (force == 1) ? 0 : 1;
-
- int catalyst_warn = 0;
-
- int catalyst_broken = 0;
-
- if (catalyst_check == 1)
- {
- catalyst_warn = 1;
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- // v14.9 and higher
- if ((atoi (device_param->device_version) >= 1573)
- && (atoi (device_param->driver_version) >= 1573))
- {
- catalyst_warn = 0;
- }
+ if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
- catalyst_check = 0;
- }
+ if (device_param->skipped) continue;
- if (catalyst_broken == 1)
- {
- log_error ("");
- log_error ("ATTENTION! The installed catalyst driver in your system is known to be broken!");
- log_error ("It will pass over cracked hashes and does not report them as cracked");
- log_error ("You are STRONGLY encouraged not to use it");
- log_error ("You can use --force to override this but do not post error reports if you do so");
-
- return (-1);
- }
+ const uint platform_devices_id = device_param->platform_devices_id;
- if (catalyst_warn == 1)
- {
- log_error ("");
- log_error ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
- log_error ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
- log_error ("See oclHashcat's homepage for official supported catalyst drivers");
- #ifdef _WIN
- log_error ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
- #endif
- log_error ("You can use --force to override this but do not post error reports if you do so");
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ if (device_param->vendor_id == VENDOR_ID_NV)
+ {
+ memcpy (&data.hm_device[device_id], &hm_adapters_nv[platform_devices_id], sizeof (hm_attrs_t));
+ }
+ #endif
- return (-1);
- }
+ #ifdef HAVE_ADL
+ if (device_param->vendor_id == VENDOR_ID_AMD)
+ {
+ memcpy (&data.hm_device[device_id], &hm_adapters_amd[platform_devices_id], sizeof (hm_attrs_t));
}
+ #endif
}
}
* Driver / ADL bug?
*/
- if (vendor_id == VENDOR_ID_AMD)
+ #ifdef HAVE_ADL
+ if (powertune_enable == 1)
{
- if (powertune_enable == 1)
+ hc_thread_mutex_lock (mux_adl);
+
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
{
- hc_thread_mutex_lock (mux_adl);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- for (uint i = 0; i < devices_cnt; i++)
+ if (data.hm_device[device_id].od_version == 6)
{
- if (data.hm_device[i].od_version == 6)
+ // set powertune value only
+
+ int powertune_supported = 0;
+
+ int ADL_rc = 0;
+
+ if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
- // set powertune value only
+ log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
- int powertune_supported = 0;
+ return (-1);
+ }
- int ADL_rc = 0;
+ if (powertune_supported != 0)
+ {
+ // powertune set
+ ADLOD6PowerControlInfo powertune = {0, 0, 0, 0, 0};
- if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll, data.hm_device[i].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune)) != ADL_OK)
{
- log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
+ log_error ("ERROR: Failed to get current ADL PowerControl settings");
return (-1);
}
- if (powertune_supported != 0)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
{
- // powertune set
- ADLOD6PowerControlInfo powertune = {0, 0, 0, 0, 0};
-
- if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll, data.hm_device[i].adapter_index.amd, &powertune)) != ADL_OK)
- {
- log_error ("ERROR: Failed to get current ADL PowerControl settings");
-
- return (-1);
- }
-
- if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll, data.hm_device[i].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
- {
- log_error ("ERROR: Failed to set new ADL PowerControl values");
+ log_error ("ERROR: Failed to set new ADL PowerControl values");
- return (-1);
- }
+ return (-1);
}
}
}
-
- hc_thread_mutex_unlock (mux_adl);
}
+
+ hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADK
+ #endif // HAVE_HWMON
uint kernel_blocks_all = 0;
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
/**
* device properties
*/
* create command-queue
*/
- // not support with NV
+ // not supported with NV
// device_param->command_queue = hc_clCreateCommandQueueWithProperties (device_param->context, device_param->device, NULL);
device_param->command_queue = hc_clCreateCommandQueue (device_param->context, device_param->device, 0);
if (device_type & CL_DEVICE_TYPE_CPU)
{
- // CPU still need lots of workitems, don't know why...
- // for testing phase, lets start with this
-
- kernel_accel = 1;
+ if (benchmark_mode == 0)
+ {
+ if (kernel_accel > 16)
+ {
+ kernel_accel = 16;
+ }
+ }
+ else
+ {
+ if (kernel_accel > 64)
+ {
+ kernel_accel = 64;
+ }
+ }
}
uint kernel_power = device_processors * kernel_threads * kernel_accel;
uint size_bfs = KERNEL_BFS * sizeof (bf_t);
uint size_tm = 32 * sizeof (bs_word_t);
- uint64_t size_scryptV = 1;
+ u64 size_scryptV = 1;
if ((hash_mode == 8900) || (hash_mode == 9300))
{
if (hash_mode == 8900)
{
- if (vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
tmto_start = 1;
}
- else if (vendor_id == VENDOR_ID_NV)
+ else if (device_param->vendor_id == VENDOR_ID_NV)
{
tmto_start = 3;
}
}
else if (hash_mode == 9300)
{
- if (vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
tmto_start = 3;
}
- else if (vendor_id == VENDOR_ID_NV)
+ else if (device_param->vendor_id == VENDOR_ID_NV)
{
tmto_start = 5;
}
uint shader_per_mp = 1;
- if (vendor_id == VENDOR_ID_AMD)
+ if (device_param->vendor_id == VENDOR_ID_AMD)
{
shader_per_mp = 8;
}
-
- if (vendor_id == VENDOR_ID_NV)
+ else if (device_param->vendor_id == VENDOR_ID_NV)
{
shader_per_mp = 32;
}
char build_opts[1024];
- // we don't have sm_* on AMD but it doesn't matter
+ // we don't have sm_* on vendors not NV but it doesn't matter
- sprintf (build_opts, "-I%s/ -DVENDOR_ID=%d -DCUDA_ARCH=%d", shared_dir, vendor_id, (device_param->sm_major * 100) + device_param->sm_minor);
-
- /**
- * a0 kernel, required for some fast hashes to make weak_hash_check work
- */
-
- const uint add_flag = OPTS_TYPE_PT_ADD01
- | OPTS_TYPE_PT_ADD02
- | OPTS_TYPE_PT_ADD80
- | OPTS_TYPE_PT_ADDBITS14
- | OPTS_TYPE_PT_ADDBITS15
- | OPTS_TYPE_ST_ADD01
- | OPTS_TYPE_ST_ADD02
- | OPTS_TYPE_ST_ADD80
- | OPTS_TYPE_ST_ADDBITS14
- | OPTS_TYPE_ST_ADDBITS15;
-
- if ((weak_hash_threshold) && (attack_exec == ATTACK_EXEC_INSIDE_KERNEL) && (opts_type & add_flag))
- {
- /**
- * kernel source filename
- */
-
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
-
- generate_source_kernel_filename (attack_exec, ATTACK_KERN_STRAIGHT, kern_type, shared_dir, source_file);
-
- struct stat sst;
-
- if (stat (source_file, &sst) == -1)
- {
- log_error ("ERROR: %s: %s", source_file, strerror (errno));
-
- return -1;
- }
-
- /**
- * kernel cached filename
- */
-
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
-
- generate_cached_kernel_filename (attack_exec, ATTACK_KERN_STRAIGHT, kern_type, profile_dir, device_name_chksum, vendor_id, cached_file);
-
- int cached = 1;
-
- struct stat cst;
-
- if (stat (cached_file, &cst) == -1)
- {
- cached = 0;
- }
-
- /**
- * kernel compile or load
- */
-
- size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
-
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
-
- if (force_jit_compilation == 0)
- {
- if (cached == 0)
- {
- if (quiet == 0) log_info ("Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, cached_file);
-
- load_kernel (source_file, 1, kernel_lengths, kernel_sources);
-
- device_param->program_weak = hc_clCreateProgramWithSource (device_param->context, 1, (const char **) kernel_sources, NULL);
-
- hc_clBuildProgram (device_param->program_weak, 1, &device_param->device, build_opts, NULL, NULL);
-
- size_t binary_size;
-
- clGetProgramInfo (device_param->program_weak, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
-
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
-
- clGetProgramInfo (device_param->program_weak, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
-
- writeProgramBin (cached_file, binary, binary_size);
-
- local_free (binary);
- }
- else
- {
- if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
-
- load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
-
- device_param->program_weak = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
-
- hc_clBuildProgram (device_param->program_weak, 1, &device_param->device, build_opts, NULL, NULL);
- }
- }
- else
- {
- if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, source_file, sst.st_size);
-
- load_kernel (source_file, 1, kernel_lengths, kernel_sources);
-
- device_param->program_weak = hc_clCreateProgramWithSource (device_param->context, 1, (const char **) kernel_sources, NULL);
-
- if (force_jit_compilation == 1500)
- {
- sprintf (build_opts, "%s -DDESCRYPT_SALT=%d", build_opts, data.salts_buf[0].salt_buf[0]);
- }
- else if (force_jit_compilation == 8900)
- {
- sprintf (build_opts, "%s -DSCRYPT_N=%d -DSCRYPT_R=%d -DSCRYPT_P=%d -DSCRYPT_TMTO=%d", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.salts_buf[0].scrypt_tmto);
- }
-
- hc_clBuildProgram (device_param->program_weak, 1, &device_param->device, build_opts, NULL, NULL);
- }
-
- local_free (kernel_lengths);
- local_free (kernel_sources[0]);
- local_free (kernel_sources);
-
- // this is mostly for debug
-
- size_t ret_val_size = 0;
-
- clGetProgramBuildInfo (device_param->program_weak, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
-
- if (ret_val_size > 2)
- {
- char *build_log = (char *) mymalloc (ret_val_size + 1);
-
- memset (build_log, 0, ret_val_size + 1);
-
- clGetProgramBuildInfo (device_param->program_weak, device_param->device, CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL);
-
- puts (build_log);
-
- myfree (build_log);
- }
- }
+ sprintf (build_opts, "-I%s/ -DVENDOR_ID=%d -DCUDA_ARCH=%d -DVECT_SIZE=%u -DDEVICE_TYPE=%u", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type);
/**
* main kernel
* kernel source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
generate_source_kernel_filename (attack_exec, attack_kern, kern_type, shared_dir, source_file);
* kernel cached filename
*/
- char cached_file[256];
+ char cached_file[256] = { 0 };
- memset (cached_file, 0, sizeof (cached_file));
-
- generate_cached_kernel_filename (attack_exec, attack_kern, kern_type, profile_dir, device_name_chksum, vendor_id, cached_file);
+ generate_cached_kernel_filename (attack_exec, attack_kern, kern_type, profile_dir, device_name_chksum, cached_file);
int cached = 1;
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
- if (force_jit_compilation == 0)
+ if (force_jit_compilation == -1)
{
if (cached == 0)
{
clGetProgramInfo (device_param->program, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program, 1, &device_param->device, build_opts, NULL, NULL);
}
* kernel mp source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
generate_source_kernel_mp_filename (opti_type, opts_type, shared_dir, source_file);
* kernel mp cached filename
*/
- char cached_file[256];
+ char cached_file[256] = { 0 };
- memset (cached_file, 0, sizeof (cached_file));
-
- generate_cached_kernel_mp_filename (opti_type, opts_type, profile_dir, device_name_chksum, vendor_id, cached_file);
+ generate_cached_kernel_mp_filename (opti_type, opts_type, profile_dir, device_name_chksum, cached_file);
int cached = 1;
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
if (cached == 0)
{
clGetProgramInfo (device_param->program_mp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program_mp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_mp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program_mp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL);
}
* kernel amp source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
generate_source_kernel_amp_filename (attack_kern, shared_dir, source_file);
* kernel amp cached filename
*/
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
+ char cached_file[256] = { 0 };
- generate_cached_kernel_amp_filename (attack_kern, profile_dir, device_name_chksum, vendor_id, cached_file);
+ generate_cached_kernel_amp_filename (attack_kern, profile_dir, device_name_chksum, cached_file);
int cached = 1;
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
if (cached == 0)
{
clGetProgramInfo (device_param->program_amp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program_amp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_amp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program_amp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL);
}
hc_clEnqueueWriteBuffer (device_param->command_queue, device_param->d_rules, CL_TRUE, 0, size_rules, kernel_rules_buf, 0, NULL, NULL);
- run_kernel_bzero (device_param, device_param->d_rules_c, size_rules_c);
+ run_kernel_bzero (device_param, device_param->d_rules_c, size_rules_c);
}
else if (attack_kern == ATTACK_KERN_COMBI)
{
* kernel name
*/
- char kernel_name[64];
-
- memset (kernel_name, 0, sizeof (kernel_name));
+ char kernel_name[64] = { 0 };
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
device_param->kernel3 = hc_clCreateKernel (device_param->program, kernel_name);
}
- if (weak_hash_threshold)
- {
- if (opts_type & add_flag)
- {
- if (opti_type & OPTI_TYPE_SINGLE_HASH)
- {
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_s%02d", kern_type, 4);
-
- device_param->kernel_weak = hc_clCreateKernel (device_param->program_weak, kernel_name);
- }
- else
- {
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 4);
-
- device_param->kernel_weak = hc_clCreateKernel (device_param->program_weak, kernel_name);
- }
- }
- else
- {
- if (opti_type & OPTI_TYPE_SINGLE_HASH)
- {
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_s%02d", kern_type, 4);
-
- device_param->kernel_weak = hc_clCreateKernel (device_param->program, kernel_name);
- }
- else
- {
- snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 4);
-
- device_param->kernel_weak = hc_clCreateKernel (device_param->program, kernel_name);
- }
- }
- }
-
if (data.attack_mode == ATTACK_MODE_BF)
{
if (opts_type & OPTS_TYPE_PT_BITSLICE)
if (opts_type & OPTS_TYPE_HOOK12) hc_clSetKernelArg (device_param->kernel12, i, sizeof (cl_mem), device_param->kernel_params[i]);
if (opts_type & OPTS_TYPE_HOOK23) hc_clSetKernelArg (device_param->kernel23, i, sizeof (cl_mem), device_param->kernel_params[i]);
-
- if (weak_hash_threshold)
- {
- hc_clSetKernelArg (device_param->kernel_weak, i, sizeof (cl_mem), device_param->kernel_params[i]);
- }
}
for (uint i = 21; i <= 31; i++)
if (opts_type & OPTS_TYPE_HOOK12) hc_clSetKernelArg (device_param->kernel12, i, sizeof (cl_uint), device_param->kernel_params[i]);
if (opts_type & OPTS_TYPE_HOOK23) hc_clSetKernelArg (device_param->kernel23, i, sizeof (cl_uint), device_param->kernel_params[i]);
-
- if (weak_hash_threshold)
- {
- hc_clSetKernelArg (device_param->kernel_weak, i, sizeof (cl_uint), device_param->kernel_params[i]);
- }
}
if (attack_mode == ATTACK_MODE_BF)
* Store initial fanspeed if gpu_temp_retain is enabled
*/
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
int gpu_temp_retain_set = 0;
if (gpu_temp_disable == 0)
uint cur_temp = 0;
uint default_temp = 0;
- int ADL_rc = hc_ADL_Overdrive6_TargetTemperatureData_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, (int *) &cur_temp, (int *) &default_temp);
+ int ADL_rc = hc_ADL_Overdrive6_TargetTemperatureData_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, (int *) &cur_temp, (int *) &default_temp);
if (ADL_rc == ADL_OK)
{
int powertune_supported = 0;
- if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
ADLOD6PowerControlInfo powertune = {0, 0, 0, 0, 0};
- if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &powertune)) == ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControlInfo_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune)) == ADL_OK)
{
- ADL_rc = hc_ADL_Overdrive_PowerControl_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &od_power_control_status[device_id]);
+ ADL_rc = hc_ADL_Overdrive_PowerControl_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &od_power_control_status[device_id]);
}
if (ADL_rc != ADL_OK)
return (-1);
}
- if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll, data.hm_device[device_id].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, powertune.iMaxValue)) != ADL_OK)
{
log_error ("ERROR: Failed to set new ADL PowerControl values");
od_clock_mem_status[device_id].state.iNumberOfPerformanceLevels = 2;
- if ((ADL_rc = hc_ADL_Overdrive_StateInfo_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, ADL_OD6_GETSTATEINFO_CUSTOM_PERFORMANCE, &od_clock_mem_status[device_id])) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_StateInfo_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_GETSTATEINFO_CUSTOM_PERFORMANCE, &od_clock_mem_status[device_id])) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL memory and engine clock frequency");
ADLOD6Capabilities caps = {0, 0, 0, {0, 0, 0}, {0, 0, 0}, 0, 0};
- if ((ADL_rc = hc_ADL_Overdrive_Capabilities_Get (data.hm_dll, data.hm_device[device_id].adapter_index.amd, &caps)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_Capabilities_Get (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &caps)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL device capabilities");
int engine_clock_profile_max = od_clock_mem_status[device_id].state.aLevels[1].iEngineClock;
int memory_clock_profile_max = od_clock_mem_status[device_id].state.aLevels[1].iMemoryClock;
- // warning if profile has to low max values
+ // warning if profile has too low max values
if ((engine_clock_max - engine_clock_profile_max) > warning_trigger_engine)
{
performance_state->aLevels[0].iMemoryClock = memory_clock_profile_max;
performance_state->aLevels[1].iMemoryClock = memory_clock_profile_max;
- if ((ADL_rc = hc_ADL_Overdrive_State_Set (data.hm_dll, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
+ if ((ADL_rc = hc_ADL_Overdrive_State_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
{
log_info ("ERROR: Failed to set ADL performance state");
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON && HAVE_ADL
}
data.kernel_blocks_all = kernel_blocks_all;
* keep track of the progress
*/
- data.words_progress_done = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
- data.words_progress_rejected = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
- data.words_progress_restored = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
+ data.words_progress_done = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
+ data.words_progress_rejected = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
+ data.words_progress_restored = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
/**
* open filehandles
data.quiet = 1;
- const uint64_t words1_cnt = count_words (wl_data, fp1, dictfile1, dictstat_base, &dictstat_nmemb);
+ const u64 words1_cnt = count_words (wl_data, fp1, dictfile1, dictstat_base, &dictstat_nmemb);
data.quiet = quiet;
data.quiet = 1;
- const uint64_t words2_cnt = count_words (wl_data, fp2, dictfile2, dictstat_base, &dictstat_nmemb);
+ const u64 words2_cnt = count_words (wl_data, fp2, dictfile2, dictstat_base, &dictstat_nmemb);
data.quiet = quiet;
if (weak_hash_threshold >= salts_cnt)
{
+ uint first_device_id = 0;
+
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ first_device_id = device_id;
+
+ break;
+ }
+
if (data.quiet == 0) log_info_nn ("Checking for weak hashes...");
for (uint salt_pos = 0; salt_pos < salts_cnt; salt_pos++)
{
- weak_hash_check (&data.devices_param[0], salt_pos, kernel_loops);
+ weak_hash_check (&data.devices_param[first_device_id], salt_pos, kernel_loops);
}
}
data.css_cnt = css_cnt;
data.css_buf = css_buf;
- uint uniq_tbls[SP_PW_MAX][CHARSIZ];
-
- memset (uniq_tbls, 0, sizeof (uniq_tbls));
+ uint uniq_tbls[SP_PW_MAX][CHARSIZ] = { { 0 } };
mp_css_to_uniq_tbl (css_cnt, css_buf, uniq_tbls);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->kernel_params_mp[0] = &device_param->d_combs;
device_param->kernel_params_mp[1] = &device_param->d_root_css_buf;
device_param->kernel_params_mp[2] = &device_param->d_markov_css_buf;
data.devices_status = STATUS_INIT;
- memset (data.words_progress_done, 0, data.salts_cnt * sizeof (uint64_t));
- memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (uint64_t));
- memset (data.words_progress_restored, 0, data.salts_cnt * sizeof (uint64_t));
+ memset (data.words_progress_done, 0, data.salts_cnt * sizeof (u64));
+ memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (u64));
+ memset (data.words_progress_restored, 0, data.salts_cnt * sizeof (u64));
memset (data.cpt_buf, 0, CPT_BUF * sizeof (cpt_t));
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->speed_pos = 0;
- memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (uint64_t));
+ memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (u64));
memset (device_param->speed_ms, 0, SPEED_CACHE * sizeof (float));
memset (device_param->speed_rec, 0, SPEED_CACHE * sizeof (hc_timer_t));
if (maskpos > 0 && dictpos == 0) free (masks[maskpos - 1]);
- uint uniq_tbls[SP_PW_MAX][CHARSIZ];
-
- memset (uniq_tbls, 0, sizeof (uniq_tbls));
+ uint uniq_tbls[SP_PW_MAX][CHARSIZ] = { { 0 } };
mp_css_to_uniq_tbl (css_cnt, css_buf, uniq_tbls);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->kernel_params_mp_l[0] = &device_param->d_pws_buf;
device_param->kernel_params_mp_l[1] = &device_param->d_root_css_buf;
device_param->kernel_params_mp_l[2] = &device_param->d_markov_css_buf;
}
}
- uint64_t words_base = data.words_cnt;
+ u64 words_base = data.words_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
{
hc_device_param_t *device_param = &devices_param[device_id];
- device_param->device_id = device_id;
-
if (wordlist_mode == WL_MODE_STDIN)
{
hc_thread_create (c_threads[device_id], thread_calc_stdin, device_param);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
local_free (device_param->result);
local_free (device_param->pw_caches);
if (device_param->kernel_tb) hc_clReleaseKernel (device_param->kernel_tb);
if (device_param->kernel_tm) hc_clReleaseKernel (device_param->kernel_tm);
if (device_param->kernel_amp) hc_clReleaseKernel (device_param->kernel_amp);
- if (device_param->kernel_weak) hc_clReleaseKernel (device_param->kernel_weak);
if (device_param->program) hc_clReleaseProgram (device_param->program);
if (device_param->program_mp) hc_clReleaseProgram (device_param->program_mp);
if (device_param->program_amp) hc_clReleaseProgram (device_param->program_amp);
- if (device_param->program_weak) hc_clReleaseProgram (device_param->program_weak);
if (device_param->command_queue) hc_clReleaseCommandQueue (device_param->command_queue);
if (device_param->context) hc_clReleaseContext (device_param->context);
// reset default fan speed
+ #ifdef HAVE_HWMON
if (gpu_temp_disable == 0)
{
+ #ifdef HAVE_ADL
if (gpu_temp_retain != 0) // VENDOR_ID_AMD is implied here
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].fan_supported == 1)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if (data.hm_device[device_id].fan_supported == 1)
{
- int fanspeed = temp_retain_fanspeed_value[i];
+ int fanspeed = temp_retain_fanspeed_value[device_id];
if (fanspeed == -1) continue;
- int rc = hm_set_fanspeed_with_device_id_amd (i, fanspeed);
+ int rc = hm_set_fanspeed_with_device_id_amd (device_id, fanspeed);
- if (rc == -1) log_info ("WARNING: Failed to restore default fan speed for gpu number: %i:", i);
+ if (rc == -1) log_info ("WARNING: Failed to restore default fan speed for gpu number: %i:", device_id);
}
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADL
}
// reset power tuning
+ #ifdef HAVE_ADL
if (powertune_enable == 1) // VENDOR_ID_AMD is implied here
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].od_version == 6)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if (data.hm_device[device_id].od_version == 6)
{
// check powertune capabilities first, if not available then skip device
int powertune_supported = 0;
- if ((hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll, data.hm_device[i].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
{
// powercontrol settings
- if ((hc_ADL_Overdrive_PowerControl_Set (data.hm_dll, data.hm_device[i].adapter_index.amd, od_power_control_status[i])) != ADL_OK)
+ if ((hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, od_power_control_status[device_id])) != ADL_OK)
{
log_info ("ERROR: Failed to restore the ADL PowerControl values");
performance_state->iNumberOfPerformanceLevels = 2;
- performance_state->aLevels[0].iEngineClock = od_clock_mem_status[i].state.aLevels[0].iEngineClock;
- performance_state->aLevels[1].iEngineClock = od_clock_mem_status[i].state.aLevels[1].iEngineClock;
- performance_state->aLevels[0].iMemoryClock = od_clock_mem_status[i].state.aLevels[0].iMemoryClock;
- performance_state->aLevels[1].iMemoryClock = od_clock_mem_status[i].state.aLevels[1].iMemoryClock;
+ performance_state->aLevels[0].iEngineClock = od_clock_mem_status[device_id].state.aLevels[0].iEngineClock;
+ performance_state->aLevels[1].iEngineClock = od_clock_mem_status[device_id].state.aLevels[1].iEngineClock;
+ performance_state->aLevels[0].iMemoryClock = od_clock_mem_status[device_id].state.aLevels[0].iMemoryClock;
+ performance_state->aLevels[1].iMemoryClock = od_clock_mem_status[device_id].state.aLevels[1].iMemoryClock;
- if ((hc_ADL_Overdrive_State_Set (data.hm_dll, data.hm_device[i].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
+ if ((hc_ADL_Overdrive_State_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
{
log_info ("ERROR: Failed to restore ADL performance state");
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADL
if (gpu_temp_disable == 0)
{
- if (vendor_id == VENDOR_ID_NV)
+ #if defined(LINUX) && defined(HAVE_NVML)
+ if (data.hm_dll_nv)
{
- #ifdef LINUX
- hc_NVML_nvmlShutdown (data.hm_dll);
- #endif
+ hc_NVML_nvmlShutdown (data.hm_dll_nv);
- #ifdef WIN
- NvAPI_Unload ();
- #endif
+ hm_close (data.hm_dll_nv);
}
+ #endif
- if (vendor_id == VENDOR_ID_AMD)
- {
- hc_ADL_Main_Control_Destroy (data.hm_dll);
-
- hm_close (data.hm_dll);
- }
+ #if defined(WIN) && (HAVE_NVAPI)
+ NvAPI_Unload ();
+ #endif
- #ifdef LINUX
- if (vendor_id == VENDOR_ID_NV)
+ #ifdef HAVE_ADL
+ if (data.hm_dll_amd)
{
- hm_close (data.hm_dll);
+ hc_ADL_Main_Control_Destroy (data.hm_dll_amd);
+
+ hm_close (data.hm_dll_amd);
}
#endif
}
+ #endif // HAVE_HWMON
// free memory
local_free (bitmap_s2_c);
local_free (bitmap_s2_d);
+ #ifdef HAVE_HWMON
local_free (temp_retain_fanspeed_value);
+ #ifdef HAVE_ADL
local_free (od_clock_mem_status);
local_free (od_power_control_status);
+ #endif // ADL
+ #endif
global_free (devices_param);