* License.....: MIT
*/
+#ifdef OSX
+#include <stdio.h>
+#endif
+
#include <common.h>
#include <shared.h>
#include <rp_kernel_on_cpu.h>
#include <getopt.h>
const char *PROGNAME = "oclHashcat";
-const char *VERSION_TXT = "2.10";
const uint VERSION_BIN = 210;
const uint RESTORE_MIN = 210;
#define POWERTUNE_ENABLE 0
#define LOGFILE_DISABLE 0
#define SCRYPT_TMTO 0
+#define OPENCL_VECTOR_WIDTH 0
#define WL_MODE_STDIN 1
#define WL_MODE_FILE 2
1000,
1100,
2100,
- 12800,
+ 12800,
1500,
12400,
500,
* types
*/
-static void (*get_next_word_func) (char *, uint32_t, uint32_t *, uint32_t *);
+static void (*get_next_word_func) (char *, u32, u32 *, u32 *);
/**
* globals
" -c, --segment-size=NUM Size in MB to cache from the wordfile",
" --bitmap-min=NUM Minimum number of bits allowed for bitmaps",
" --bitmap-max=NUM Maximum number of bits allowed for bitmaps",
+ #ifndef OSX
" --cpu-affinity=STR Locks to CPU devices, separate with comma",
+ #else
+ " --cpu-affinity=STR Locks to CPU devices, separate with comma (disabled on OSX)",
+ #endif
" --opencl-platforms=STR OpenCL platforms to use, separate with comma",
" -d, --opencl-devices=STR OpenCL devices to use, separate with comma",
" --opencl-device-types=STR OpenCL device-types to use, separate with comma, see references below",
+ " --opencl-vector-width=NUM OpenCL vector-width (either 1, 2, 4 or 8), overrides value from device query",
" -w, --workload-profile=NUM Enable a specific workload profile, see references below",
" -n, --kernel-accel=NUM Workload tuning: 1, 8, 40, 80, 160",
" -u, --kernel-loops=NUM Workload fine-tuning: 8 - 1024",
+ #ifdef HAVE_HWMON
" --gpu-temp-disable Disable temperature and fanspeed readings and triggers",
" --gpu-temp-abort=NUM Abort session if GPU temperature reaches NUM degrees celsius",
" --gpu-temp-retain=NUM Try to retain GPU temperature at NUM degrees celsius (AMD only)",
+ #ifdef HAVE_ADL
" --powertune-enable Enable automatic power tuning option (AMD OverDrive 6 only)",
+ #endif
+ #endif
" --scrypt-tmto=NUM Manually override automatically calculated TMTO value for scrypt",
"",
"* Distributed:",
{
hc_device_param_t *device_param = &data.devices_param[device_id];
- uint64_t speed_cnt = 0;
- float speed_ms = 0;
+ if (device_param->skipped) continue;
+
+ u64 speed_cnt = 0;
+ float speed_ms = 0;
for (int i = 0; i < SPEED_CACHE; i++)
{
* words_cur
*/
- uint64_t words_cur = get_lowest_words_done ();
+ u64 words_cur = get_lowest_words_done ();
fprintf (out, "CURKU\t%llu\t", (unsigned long long int) words_cur);
if (salts_left == 0) salts_left = 1;
- uint64_t progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * salts_left;
- uint64_t all_done = 0;
- uint64_t all_rejected = 0;
- uint64_t all_restored = 0;
+ u64 all_done = 0;
+ u64 all_rejected = 0;
+ u64 all_restored = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
all_restored += data.words_progress_restored[salt_pos];
}
- uint64_t progress_cur = all_restored + all_done + all_rejected;
- uint64_t progress_end = progress_total;
+ u64 progress_cur = all_restored + all_done + all_rejected;
+ u64 progress_end = progress_total;
- uint64_t progress_skip = 0;
+ u64 progress_skip = 0;
if (data.skip)
{
else if (data.attack_kern == ATTACK_KERN_BF) progress_end *= data.bfs_cnt;
}
- uint64_t progress_cur_relative_skip = progress_cur - progress_skip;
- uint64_t progress_end_relative_skip = progress_end - progress_skip;
+ u64 progress_cur_relative_skip = progress_cur - progress_skip;
+ u64 progress_end_relative_skip = progress_end - progress_skip;
fprintf (out, "PROGRESS\t%llu\t%llu\t", (unsigned long long int) progress_cur_relative_skip, (unsigned long long int) progress_end_relative_skip);
* temperature
*/
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
fprintf (out, "TEMP\t");
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- int temp = hm_get_temperature_with_device_id (i);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ int temp = hm_get_temperature_with_device_id (device_id);
fprintf (out, "%d\t", temp);
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
#ifdef _WIN
fputc ('\r', out);
* speed new
*/
- uint64_t speed_cnt[DEVICES_MAX];
- float speed_ms[DEVICES_MAX];
+ u64 speed_cnt[DEVICES_MAX];
+ float speed_ms[DEVICES_MAX];
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
// we need to clear values (set to 0) because in case the device does
// not get new candidates it idles around but speed display would
// show it as working.
// if we instantly set it to 0 after reading it happens that the
- // speed can be shown as zero if the users refreshs to fast.
+ // speed can be shown as zero if the users refreshes too fast.
// therefore, we add a timestamp when a stat was recorded and if its
- // to old we will not use it
+ // too old we will not use it
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
hashes_dev_ms[device_id] = 0;
if (speed_ms[device_id])
if (salts_left == 0) salts_left = 1;
- uint64_t progress_total = data.words_cnt * salts_left;
+ u64 progress_total = data.words_cnt * salts_left;
- uint64_t all_done = 0;
- uint64_t all_rejected = 0;
- uint64_t all_restored = 0;
+ u64 all_done = 0;
+ u64 all_rejected = 0;
+ u64 all_restored = 0;
for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
{
all_restored += data.words_progress_restored[salt_pos];
}
- uint64_t progress_cur = all_restored + all_done + all_rejected;
- uint64_t progress_end = progress_total;
+ u64 progress_cur = all_restored + all_done + all_rejected;
+ u64 progress_end = progress_total;
- uint64_t progress_skip = 0;
+ u64 progress_skip = 0;
if (data.skip)
{
else if (data.attack_kern == ATTACK_KERN_BF) progress_end *= data.bfs_cnt;
}
- uint64_t progress_cur_relative_skip = progress_cur - progress_skip;
- uint64_t progress_end_relative_skip = progress_end - progress_skip;
+ u64 progress_cur_relative_skip = progress_cur - progress_skip;
+ u64 progress_end_relative_skip = progress_end - progress_skip;
- float speed_ms_real = ms_running - ms_paused;
- uint64_t speed_plains_real = all_done;
+ float speed_ms_real = ms_running - ms_paused;
+ u64 speed_plains_real = all_done;
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
if (data.devices_status != STATUS_CRACKED)
{
- uint64_t words_per_ms = 0;
+ u64 words_per_ms = 0;
if (speed_plains_real && speed_ms_real)
{
if (words_per_ms)
{
- uint64_t progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
+ u64 progress_left_relative_skip = progress_end_relative_skip - progress_cur_relative_skip;
- uint64_t ms_left = progress_left_relative_skip / words_per_ms;
+ u64 ms_left = progress_left_relative_skip / words_per_ms;
sec_etc = ms_left / 1000;
}
{
log_info ("Time.Estimated.: 0 secs");
}
- else if ((uint64_t) sec_etc > ETC_MAX)
+ else if ((u64) sec_etc > ETC_MAX)
{
log_info ("Time.Estimated.: > 10 Years");
}
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- char display_dev_cur[16];
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- memset (display_dev_cur, 0, sizeof (display_dev_cur));
+ char display_dev_cur[16] = { 0 };
strncpy (display_dev_cur, "0.00", 4);
log_info ("Speed.Dev.#%d...: %9sH/s", device_id + 1, display_dev_cur);
}
- char display_all_cur[16];
-
- memset (display_all_cur, 0, sizeof (display_all_cur));
+ char display_all_cur[16] = { 0 };
strncpy (display_all_cur, "0.00", 4);
format_speed_display (hashes_all_ms * 1000, display_all_cur, sizeof (display_all_cur));
- if (data.devices_cnt > 1) log_info ("Speed.Dev.#*...: %9sH/s", display_all_cur);
+ if (data.devices_active > 1) log_info ("Speed.Dev.#*...: %9sH/s", display_all_cur);
const float digests_percent = (float) data.digests_done / data.digests_cnt;
const float salts_percent = (float) data.salts_done / data.salts_cnt;
// Restore point
- uint64_t restore_point = get_lowest_words_done ();
+ u64 restore_point = get_lowest_words_done ();
- uint64_t restore_total = data.words_base;
+ u64 restore_total = data.words_base;
float percent_restore = 0;
{
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
- log_info ("Progress.......: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
- log_info ("Rejected.......: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
+ log_info ("Progress.......: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
+ log_info ("Rejected.......: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
if (data.restore_disable == 0)
{
- log_info ("Restore.Point..: %llu/%llu (%.02f%%)", (uint64_t) 0, (uint64_t) 0, (float) 100);
+ log_info ("Restore.Point..: %llu/%llu (%.02f%%)", (u64) 0, (u64) 0, (float) 100);
}
}
else
}
}
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
hc_thread_mutex_lock (mux_adl);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
#define HM_STR_BUF_SIZE 255
if (data.hm_device[device_id].fan_supported == 1)
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
}
static void status_benchmark ()
if (data.words_cnt == 0) return;
- uint64_t speed_cnt[DEVICES_MAX];
- float speed_ms[DEVICES_MAX];
+ u64 speed_cnt[DEVICES_MAX];
+ float speed_ms[DEVICES_MAX];
- uint device_id;
-
- for (device_id = 0; device_id < data.devices_cnt; device_id++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
speed_cnt[device_id] = 0;
speed_ms[device_id] = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
hashes_dev_ms[device_id] = 0;
if (speed_ms[device_id])
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- char display_dev_cur[16];
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- memset (display_dev_cur, 0, sizeof (display_dev_cur));
+ char display_dev_cur[16] = { 0 };
strncpy (display_dev_cur, "0.00", 4);
log_info ("Speed.Dev.#%d.: %9sH/s", device_id + 1, display_dev_cur);
}
- char display_all_cur[16];
-
- memset (display_all_cur, 0, sizeof (display_all_cur));
+ char display_all_cur[16] = { 0 };
strncpy (display_all_cur, "0.00", 4);
format_speed_display (hashes_all_ms * 1000, display_all_cur, sizeof (display_all_cur));
- if (data.devices_cnt > 1) log_info ("Speed.Dev.#*.: %9sH/s", display_all_cur);
+ if (data.devices_active > 1) log_info ("Speed.Dev.#*.: %9sH/s", display_all_cur);
}
/**
for (i = 0, j = 0; j < line_len; i += 1, j += 2)
{
- line_buf[i] = hex_to_char (&line_buf[j]);
+ line_buf[i] = hex_to_u8 ((const u8 *) &line_buf[j]);
}
memset (line_buf + i, 0, line_len - i);
for (i = 0, j = 5; j < line_len - 1; i += 1, j += 2)
{
- line_buf[i] = hex_to_char (&line_buf[j]);
+ line_buf[i] = hex_to_u8 ((const u8 *) &line_buf[j]);
}
memset (line_buf + i, 0, line_len - i);
{
uint cnt = 0;
- char *buf = (char *) mymalloc (BUFSIZ);
+ char *buf = (char *) mymalloc (BUFSIZ + 1);
size_t nread_tmp = 0;
fflush (stdout);
}
-static void gidd_to_pw_t (hc_device_param_t *device_param, const uint64_t gidd, pw_t *pw)
+static void gidd_to_pw_t (hc_device_param_t *device_param, const u64 gidd, pw_t *pw)
{
hc_clEnqueueReadBuffer (device_param->command_queue, device_param->d_pws_buf, CL_TRUE, gidd * sizeof (pw_t), sizeof (pw_t), pw, 0, NULL, NULL);
}
int debug_rule_len = 0; // -1 error
uint debug_plain_len = 0;
- unsigned char debug_plain_ptr[BLOCK_SIZE];
+ u8 debug_plain_ptr[BLOCK_SIZE];
// hash
- char out_buf[4096]; memset (out_buf, 0, sizeof (out_buf));
+ char out_buf[4096] = { 0 };
ascii_digest (out_buf, salt_pos, digest_pos);
uint gidvid = plain.gidvid;
uint il_pos = plain.il_pos;
- uint64_t crackpos = device_param->words_off;
+ u64 crackpos = device_param->words_off;
uint plain_buf[16];
- unsigned char *plain_ptr = (unsigned char *) plain_buf;
+ u8 *plain_ptr = (u8 *) plain_buf;
unsigned int plain_len = 0;
if (data.attack_mode == ATTACK_MODE_STRAIGHT)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
}
else if (data.attack_mode == ATTACK_MODE_COMBI)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- uint64_t l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
- uint64_t r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
+ u64 l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
+ u64 r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
uint l_start = device_param->kernel_params_mp_l_buf32[5];
uint r_start = device_param->kernel_params_mp_r_buf32[5];
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
plain_len = pw.pw_len;
- uint64_t off = device_param->kernel_params_mp_buf64[3] + il_pos;
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
uint start = 0;
uint stop = device_param->kernel_params_mp_buf32[4];
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- uint64_t gidd = gidvid;
- uint64_t gidm = 0;
+ u64 gidd = gidvid;
+ u64 gidm = 0;
pw_t pw;
plain_len = pw.pw_len;
- uint64_t off = device_param->kernel_params_mp_buf64[3] + il_pos;
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
uint start = 0;
uint stop = device_param->kernel_params_mp_buf32[4];
{
char *hashfile = data.hashfile;
- char new_hashfile[256];
- char old_hashfile[256];
-
- memset (new_hashfile, 0, sizeof (new_hashfile));
- memset (old_hashfile, 0, sizeof (old_hashfile));
+ char new_hashfile[256] = { 0 };
+ char old_hashfile[256] = { 0 };
snprintf (new_hashfile, 255, "%s.new", hashfile);
snprintf (old_hashfile, 255, "%s.old", hashfile);
if (data.hash_mode != 2500)
{
- char out_buf[4096];
-
- memset (out_buf, 0, sizeof (out_buf));
+ char out_buf[4096] = { 0 };
if (data.username == 1)
{
unlink (old_hashfile);
}
-static float find_kernel_blocks_div (const uint64_t total_left, const uint kernel_blocks_all)
+static float find_kernel_blocks_div (const u64 total_left, const uint kernel_blocks_all)
{
// function called only in case kernel_blocks_all > words_left)
kernel_blocks_div += kernel_blocks_div / 100;
- uint32_t kernel_blocks_new = (uint32_t) (kernel_blocks_all * kernel_blocks_div);
+ u32 kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
while (kernel_blocks_new < total_left)
{
kernel_blocks_div += kernel_blocks_div / 100;
- kernel_blocks_new = (uint32_t) (kernel_blocks_all * kernel_blocks_div);
+ kernel_blocks_new = (u32) (kernel_blocks_all * kernel_blocks_div);
}
if (data.quiet == 0)
}
else if (data.attack_kern == ATTACK_KERN_BF)
{
- const uint64_t off = device_param->words_off;
+ const u64 off = device_param->words_off;
device_param->kernel_params_mp_l_buf64[3] = off;
if (run_rule_engine (data.rule_len_r, data.rule_buf_r))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = _old_apply_rule (data.rule_buf_r, data.rule_len_r, line_buf, line_len, rule_buf_out);
line_len = MIN (line_len, PW_DICTMAX);
- char *ptr = (char *) device_param->combs_buf[i].i;
+ u8 *ptr = (u8 *) device_param->combs_buf[i].i;
memcpy (ptr, line_buf_new, line_len);
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_r_buf64[3] = off;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_buf64[3] = off;
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- uint64_t off = innerloop_pos;
+ u64 off = innerloop_pos;
device_param->kernel_params_mp_buf64[3] = off;
* progress
*/
- uint64_t perf_sum_all = (uint64_t) pw_cnt * (uint64_t) innerloop_left;
+ u64 perf_sum_all = (u64) pw_cnt * (u64) innerloop_left;
hc_thread_mutex_lock (mux_counter);
return;
}
-static void get_next_word_lm (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_lm (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr >= 'a' && *ptr <= 'z') *ptr -= 0x20;
*len = sz;
}
-static void get_next_word_uc (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_uc (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr >= 'a' && *ptr <= 'z') *ptr -= 0x20;
*len = sz;
}
-static void get_next_word_std (char *buf, uint32_t sz, uint32_t *len, uint32_t *off)
+static void get_next_word_std (char *buf, u32 sz, u32 *len, u32 *off)
{
char *ptr = buf;
- for (uint32_t i = 0; i < sz; i++, ptr++)
+ for (u32 i = 0; i < sz; i++, ptr++)
{
if (*ptr != '\n') continue;
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
#ifdef _POSIX
-static uint64_t count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, size_t *dictstat_nmemb)
+static u64 count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, size_t *dictstat_nmemb)
#endif
#ifdef _WIN
-static uint64_t count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, uint *dictstat_nmemb)
+static u64 count_words (wl_data_t *wl_data, FILE *fd, char *dictfile, dictstat_t *dictstat_base, uint *dictstat_nmemb)
#endif
{
hc_signal (NULL);
{
if (d_cache)
{
- uint64_t cnt = d_cache->cnt;
+ u64 cnt = d_cache->cnt;
- uint64_t keyspace = cnt;
+ u64 keyspace = cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
time_t now = 0;
time_t prev = 0;
- uint64_t comp = 0;
- uint64_t cnt = 0;
- uint64_t cnt2 = 0;
+ u64 comp = 0;
+ u64 cnt = 0;
+ u64 cnt2 = 0;
while (!feof (fd))
{
comp += wl_data->cnt;
- uint32_t i = 0;
+ u32 i = 0;
while (i < wl_data->cnt)
{
- uint32_t len;
- uint32_t off;
+ u32 len;
+ u32 off;
get_next_word_func (wl_data->buf + i, wl_data->cnt - i, &len, &off);
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
memcpy (p2->hi1, p1->hi1, 64 * sizeof (uint));
}
-static uint pw_add_to_hc1 (hc_device_param_t *device_param, const uint8_t *pw_buf, const uint pw_len)
+static uint pw_add_to_hc1 (hc_device_param_t *device_param, const u8 *pw_buf, const uint pw_len)
{
if (data.devices_status == STATUS_BYPASS) return 0;
uint cache_cnt = pw_cache->cnt;
- uint8_t *pw_hc1 = pw_cache->pw_buf.hc1[cache_cnt];
+ u8 *pw_hc1 = pw_cache->pw_buf.hc1[cache_cnt];
memcpy (pw_hc1, pw_buf, pw_len);
uint runtime_check = 0;
uint remove_check = 0;
uint status_check = 0;
- uint hwmon_check = 0;
uint restore_check = 0;
uint restore_left = data.restore_timer;
uint remove_left = data.remove_timer;
uint status_left = data.status_timer;
+ #ifdef HAVE_HWMON
+ uint hwmon_check = 0;
+
// these variables are mainly used for fan control (AMD only)
int *fan_speed_chgd = (int *) mycalloc (data.devices_cnt, sizeof (int));
int *temp_diff_old = (int *) mycalloc (data.devices_cnt, sizeof (int));
int *temp_diff_sum = (int *) mycalloc (data.devices_cnt, sizeof (int));
+ #ifdef HAVE_ADL
int temp_threshold = 1; // degrees celcius
int fan_speed_min = 15; // in percentage
int fan_speed_max = 100;
+ #endif // HAVE_ADL
time_t last_temp_check_time;
+ #endif // HAVE_HWMON
uint sleep_time = 1;
status_check = 1;
}
+ #ifdef HAVE_HWMON
if (data.gpu_temp_disable == 0)
{
time (&last_temp_check_time);
hwmon_check = 1;
}
+ #endif
- if ((runtime_check == 0) && (remove_check == 0) && (status_check == 0) && (hwmon_check == 0) && (restore_check == 0))
+ if ((runtime_check == 0) && (remove_check == 0) && (status_check == 0) && (restore_check == 0))
{
+ #ifdef HAVE_HWMON
+ if (hwmon_check == 0)
+ #endif
return (p);
}
if (data.devices_status != STATUS_RUNNING) continue;
+ #ifdef HAVE_HWMON
if (hwmon_check == 1)
{
hc_thread_mutex_lock (mux_adl);
if (Ta == 0) Ta = 1;
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if ((data.devices_param[i].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
- const int temperature = hm_get_temperature_with_device_id (i);
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+
+ const int temperature = hm_get_temperature_with_device_id (device_id);
if (temperature > (int) data.gpu_temp_abort)
{
- log_error ("ERROR: Temperature limit on GPU %d reached, aborting...", i + 1);
+ log_error ("ERROR: Temperature limit on GPU %d reached, aborting...", device_id + 1);
if (data.devices_status != STATUS_QUIT) myabort ();
break;
}
+ #ifdef HAVE_ADL
const int gpu_temp_retain = data.gpu_temp_retain;
if (gpu_temp_retain) // VENDOR_ID_AMD implied
{
- if (data.hm_device[i].fan_supported == 1)
+ if (data.hm_device[device_id].fan_supported == 1)
{
int temp_cur = temperature;
int temp_diff_new = gpu_temp_retain - temp_cur;
- temp_diff_sum[i] = temp_diff_sum[i] + temp_diff_new;
+ temp_diff_sum[device_id] = temp_diff_sum[device_id] + temp_diff_new;
// calculate Ta value (time difference in seconds between the last check and this check)
// PID controller (3-term controller: proportional - Kp, integral - Ki, derivative - Kd)
- int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[i] + Kd * ((float)(temp_diff_new - temp_diff_old[i])) / Ta);
+ int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[device_id] + Kd * ((float)(temp_diff_new - temp_diff_old[device_id])) / Ta);
if (abs (fan_diff_required) >= temp_threshold)
{
- const int fan_speed_cur = hm_get_fanspeed_with_device_id (i);
+ const int fan_speed_cur = hm_get_fanspeed_with_device_id (device_id);
int fan_speed_level = fan_speed_cur;
- if (fan_speed_chgd[i] == 0) fan_speed_level = temp_cur;
+ if (fan_speed_chgd[device_id] == 0) fan_speed_level = temp_cur;
int fan_speed_new = fan_speed_level - fan_diff_required;
if (fan_speed_new != fan_speed_cur)
{
- int freely_change_fan_speed = (fan_speed_chgd[i] == 1);
+ int freely_change_fan_speed = (fan_speed_chgd[device_id] == 1);
int fan_speed_must_change = (fan_speed_new > fan_speed_cur);
if ((freely_change_fan_speed == 1) || (fan_speed_must_change == 1))
{
- hm_set_fanspeed_with_device_id_amd (i, fan_speed_new);
+ hm_set_fanspeed_with_device_id_amd (device_id, fan_speed_new);
- fan_speed_chgd[i] = 1;
+ fan_speed_chgd[device_id] = 1;
}
- temp_diff_old[i] = temp_diff_new;
+ temp_diff_old[device_id] = temp_diff_new;
}
}
}
}
+ #endif // HAVE_ADL
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON
if (restore_check == 1)
{
}
}
+ #ifdef HAVE_HWMON
myfree (fan_speed_chgd);
myfree (temp_diff_old);
myfree (temp_diff_sum);
+ #endif
p = NULL;
int (*parse_func) (char *, uint, hash_t *) = data.parse_func;
// buffers
- hash_t hash_buf;
-
- memset (&hash_buf, 0, sizeof (hash_buf));
+ hash_t hash_buf = { 0, 0, 0, 0, 0 };
hash_buf.digest = mymalloc (dgst_size);
pke[i] = byte_swap_32 (wpa->pke[i]);
}
- unsigned char mac1[6];
- unsigned char mac2[6];
+ u8 mac1[6];
+ u8 mac2[6];
memcpy (mac1, pke_ptr + 23, 6);
memcpy (mac2, pke_ptr + 29, 6);
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != (unsigned char) hex_to_char (&mac1_pos[j]))
+ if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
cracked = 0;
break;
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != (unsigned char) hex_to_char (&mac2_pos[j]))
+ if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
cracked = 0;
break;
return (p);
}
-static uint get_work (hc_device_param_t *device_param, const uint64_t max)
+static uint get_work (hc_device_param_t *device_param, const u64 max)
{
hc_thread_mutex_lock (mux_dispatcher);
- const uint64_t words_cur = data.words_cur;
- const uint64_t words_base = (data.limit == 0) ? data.words_base : data.limit;
+ const u64 words_cur = data.words_cur;
+ const u64 words_base = (data.limit == 0) ? data.words_base : data.limit;
device_param->words_off = words_cur;
- const uint64_t words_left = words_base - words_cur;
+ const u64 words_left = words_base - words_cur;
if (data.kernel_blocks_all > words_left)
{
{
if (device_param->kernel_blocks == device_param->kernel_blocks_user)
{
- const uint32_t kernel_blocks_new = (float) device_param->kernel_blocks * data.kernel_blocks_div;
- const uint32_t kernel_power_new = kernel_blocks_new;
+ const u32 kernel_blocks_new = (float) device_param->kernel_blocks * data.kernel_blocks_div;
+ const u32 kernel_power_new = kernel_blocks_new;
if (kernel_blocks_new < device_param->kernel_blocks)
{
{
hc_device_param_t *device_param = (hc_device_param_t *) p;
+ if (device_param->skipped) return NULL;
+
const uint attack_kern = data.attack_kern;
const uint kernel_blocks = device_param->kernel_blocks;
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
}
- device_param->pw_add (device_param, (uint8_t *) line_buf, line_len);
+ device_param->pw_add (device_param, (u8 *) line_buf, line_len);
words_cur++;
{
hc_device_param_t *device_param = (hc_device_param_t *) p;
+ if (device_param->skipped) return NULL;
+
const uint attack_mode = data.attack_mode;
const uint attack_kern = data.attack_kern;
if (work == 0) break;
- const uint64_t words_off = device_param->words_off;
- const uint64_t words_fin = words_off + work;
+ const u64 words_off = device_param->words_off;
+ const u64 words_fin = words_off + work;
const uint pw_cnt = work;
const uint pws_cnt = work;
wl_data->cnt = 0;
wl_data->pos = 0;
- uint64_t words_cur = 0;
+ u64 words_cur = 0;
while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
- uint64_t words_off = 0;
- uint64_t words_fin = 0;
+ u64 words_off = 0;
+ u64 words_fin = 0;
- uint64_t max = -1;
+ u64 max = -1;
while (max)
{
if (run_rule_engine (data.rule_len_l, data.rule_buf_l))
{
- char rule_buf_out[BLOCK_SIZE];
-
- memset (rule_buf_out, 0, sizeof (rule_buf_out));
+ char rule_buf_out[BLOCK_SIZE] = { 0 };
int rule_len_out = -1;
}
}
- device_param->pw_add (device_param, (uint8_t *) line_buf, line_len);
+ device_param->pw_add (device_param, (u8 *) line_buf, line_len);
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
data.dictfile = (char *) weak_hash_check;
+ uint cmd0_rule_old = data.kernel_rules_buf[0].cmds[0];
+
+ data.kernel_rules_buf[0].cmds[0] = 0;
+
/**
* run the kernel
*/
device_param->kernel_params_buf32[31] = 0;
data.dictfile = dictfile_old;
+
+ data.kernel_rules_buf[0].cmds[0] = cmd0_rule_old;
}
// hlfmt hashcat
// wrapper around mymalloc for ADL
+#if defined(HAVE_HWMON) && defined(HAVE_ADL)
void *__stdcall ADL_Main_Memory_Alloc (const int iSize)
{
return mymalloc (iSize);
}
+#endif
-static uint generate_bitmaps (const uint digests_cnt, const uint dgst_size, const uint dgst_shifts, char *digests_buf_ptr, const uint bitmap_mask, const uint bitmap_size, uint *bitmap_a, uint *bitmap_b, uint *bitmap_c, uint *bitmap_d, const uint64_t collisions_max)
+static uint generate_bitmaps (const uint digests_cnt, const uint dgst_size, const uint dgst_shifts, char *digests_buf_ptr, const uint bitmap_mask, const uint bitmap_size, uint *bitmap_a, uint *bitmap_b, uint *bitmap_c, uint *bitmap_d, const u64 collisions_max)
{
- uint64_t collisions = 0;
+ u64 collisions = 0;
const uint dgst_pos0 = data.dgst_pos0;
const uint dgst_pos1 = data.dgst_pos1;
digests_buf_ptr += dgst_size;
- const uint val0 = 1 << (digest_ptr[dgst_pos0] & 0x1f);
- const uint val1 = 1 << (digest_ptr[dgst_pos1] & 0x1f);
- const uint val2 = 1 << (digest_ptr[dgst_pos2] & 0x1f);
- const uint val3 = 1 << (digest_ptr[dgst_pos3] & 0x1f);
+ const uint val0 = 1u << (digest_ptr[dgst_pos0] & 0x1f);
+ const uint val1 = 1u << (digest_ptr[dgst_pos1] & 0x1f);
+ const uint val2 = 1u << (digest_ptr[dgst_pos2] & 0x1f);
+ const uint val3 = 1u << (digest_ptr[dgst_pos3] & 0x1f);
const uint idx0 = (digest_ptr[dgst_pos0] >> dgst_shifts) & bitmap_mask;
const uint idx1 = (digest_ptr[dgst_pos1] >> dgst_shifts) & bitmap_mask;
putenv ((char *) "DISPLAY=:0");
}
- /*
if (getenv ("GPU_MAX_ALLOC_PERCENT") == NULL)
putenv ((char *) "GPU_MAX_ALLOC_PERCENT=100");
+ if (getenv ("CPU_MAX_ALLOC_PERCENT") == NULL)
+ putenv ((char *) "CPU_MAX_ALLOC_PERCENT=100");
+
if (getenv ("GPU_USE_SYNC_OBJECTS") == NULL)
putenv ((char *) "GPU_USE_SYNC_OBJECTS=1");
- */
/**
* Real init
uint username = USERNAME;
uint remove = REMOVE;
uint remove_timer = REMOVE_TIMER;
- uint64_t skip = SKIP;
- uint64_t limit = LIMIT;
+ u64 skip = SKIP;
+ u64 limit = LIMIT;
uint keyspace = KEYSPACE;
uint potfile_disable = POTFILE_DISABLE;
uint debug_mode = DEBUG_MODE;
uint increment = INCREMENT;
uint increment_min = INCREMENT_MIN;
uint increment_max = INCREMENT_MAX;
+ #ifndef OSX
char *cpu_affinity = NULL;
+ #endif
char *opencl_devices = NULL;
char *opencl_platforms = NULL;
char *opencl_device_types = NULL;
+ uint opencl_vector_width = OPENCL_VECTOR_WIDTH;
char *truecrypt_keyfiles = NULL;
uint workload_profile = WORKLOAD_PROFILE;
uint kernel_accel = KERNEL_ACCEL;
uint kernel_loops = KERNEL_LOOPS;
+ #ifdef HAVE_HWMON
uint gpu_temp_disable = GPU_TEMP_DISABLE;
uint gpu_temp_abort = GPU_TEMP_ABORT;
uint gpu_temp_retain = GPU_TEMP_RETAIN;
+ #ifdef HAVE_ADL
uint powertune_enable = POWERTUNE_ENABLE;
+ #endif
+ #endif
uint logfile_disable = LOGFILE_DISABLE;
uint segment_size = SEGMENT_SIZE;
uint scrypt_tmto = SCRYPT_TMTO;
#define IDX_OPENCL_DEVICES 'd'
#define IDX_OPENCL_PLATFORMS 0xff72
#define IDX_OPENCL_DEVICE_TYPES 0xff73
+ #define IDX_OPENCL_VECTOR_WIDTH 0xff74
#define IDX_WORKLOAD_PROFILE 'w'
#define IDX_KERNEL_ACCEL 'n'
#define IDX_KERNEL_LOOPS 'u'
{"markov-classic", no_argument, 0, IDX_MARKOV_CLASSIC},
{"markov-threshold", required_argument, 0, IDX_MARKOV_THRESHOLD},
{"markov-hcstat", required_argument, 0, IDX_MARKOV_HCSTAT},
+ #ifndef OSX
{"cpu-affinity", required_argument, 0, IDX_CPU_AFFINITY},
+ #endif
{"opencl-devices", required_argument, 0, IDX_OPENCL_DEVICES},
{"opencl-platforms", required_argument, 0, IDX_OPENCL_PLATFORMS},
{"opencl-device-types", required_argument, 0, IDX_OPENCL_DEVICE_TYPES},
+ {"opencl-vector-width", required_argument, 0, IDX_OPENCL_VECTOR_WIDTH},
{"workload-profile", required_argument, 0, IDX_WORKLOAD_PROFILE},
{"kernel-accel", required_argument, 0, IDX_KERNEL_ACCEL},
{"kernel-loops", required_argument, 0, IDX_KERNEL_LOOPS},
+ #ifdef HAVE_HWMON
{"gpu-temp-disable", no_argument, 0, IDX_GPU_TEMP_DISABLE},
{"gpu-temp-abort", required_argument, 0, IDX_GPU_TEMP_ABORT},
{"gpu-temp-retain", required_argument, 0, IDX_GPU_TEMP_RETAIN},
+ #ifdef HAVE_ADL
{"powertune-enable", no_argument, 0, IDX_POWERTUNE_ENABLE},
+ #endif
+ #endif // HAVE_HWMON
{"logfile-disable", no_argument, 0, IDX_LOGFILE_DISABLE},
{"truecrypt-keyfiles", required_argument, 0, IDX_TRUECRYPT_KEYFILES},
{"segment-size", required_argument, 0, IDX_SEGMENT_SIZE},
char **rp_files = (char **) mycalloc (argc, sizeof (char *));
- int option_index;
- int c;
+ int option_index = 0;
+ int c = -1;
optind = 1;
optopt = 0;
- option_index = 0;
while (((c = getopt_long (argc, argv, short_options, long_options, &option_index)) != -1) && optopt == 0)
{
if (version)
{
- log_info (VERSION_TXT);
+ log_info ("%s (%s)", VERSION_TAG, VERSION_SUM);
return (0);
}
uint remove_timer_chgd = 0;
uint increment_min_chgd = 0;
uint increment_max_chgd = 0;
- uint gpu_temp_abort_chgd = 0;
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
uint gpu_temp_retain_chgd = 0;
+ uint gpu_temp_abort_chgd = 0;
+ #endif
optind = 1;
optopt = 0;
case IDX_HEX_CHARSET: hex_charset = 1; break;
case IDX_HEX_SALT: hex_salt = 1; break;
case IDX_HEX_WORDLIST: hex_wordlist = 1; break;
+ #ifndef OSX
case IDX_CPU_AFFINITY: cpu_affinity = optarg; break;
+ #endif
case IDX_OPENCL_DEVICES: opencl_devices = optarg; break;
case IDX_OPENCL_PLATFORMS: opencl_platforms = optarg; break;
case IDX_OPENCL_DEVICE_TYPES:
opencl_device_types = optarg; break;
+ case IDX_OPENCL_VECTOR_WIDTH:
+ opencl_vector_width = atoi (optarg); break;
case IDX_WORKLOAD_PROFILE: workload_profile = atoi (optarg); break;
case IDX_KERNEL_ACCEL: kernel_accel = atoi (optarg);
kernel_accel_chgd = 1; break;
case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
kernel_loops_chgd = 1; break;
+ #ifdef HAVE_HWMON
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
- case IDX_GPU_TEMP_ABORT: gpu_temp_abort_chgd = 1;
- gpu_temp_abort = atoi (optarg); break;
- case IDX_GPU_TEMP_RETAIN: gpu_temp_retain_chgd = 1;
- gpu_temp_retain = atoi (optarg); break;
+ case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg);
+ #ifdef HAVE_ADL
+ gpu_temp_abort_chgd = 1;
+ #endif
+ break;
+ case IDX_GPU_TEMP_RETAIN: gpu_temp_retain = atoi (optarg);
+ #ifdef HAVE_ADL
+ gpu_temp_retain_chgd = 1;
+ #endif
+ break;
+ #ifdef HAVE_ADL
case IDX_POWERTUNE_ENABLE: powertune_enable = 1; break;
+ #endif
+ #endif // HAVE_HWMON
case IDX_LOGFILE_DISABLE: logfile_disable = 1; break;
case IDX_TRUECRYPT_KEYFILES: truecrypt_keyfiles = optarg; break;
case IDX_SEGMENT_SIZE: segment_size = atoi (optarg); break;
{
if (benchmark == 1)
{
- log_info ("%s v%.2f starting in benchmark-mode...", PROGNAME, (float) VERSION_BIN / 100);
+ log_info ("%s %s (%s) starting in benchmark-mode...", PROGNAME, VERSION_TAG, VERSION_SUM);
log_info ("");
}
else if (restore == 1)
{
- log_info ("%s v%.2f starting in restore-mode...", PROGNAME, (float) VERSION_BIN / 100);
+ log_info ("%s %s (%s) starting in restore-mode...", PROGNAME, VERSION_TAG, VERSION_SUM);
log_info ("");
}
else
{
- log_info ("%s v%.2f starting...", PROGNAME, (float) VERSION_BIN / 100);
+ log_info ("%s %s (%s) starting...", PROGNAME, VERSION_TAG, VERSION_SUM);
log_info ("");
}
return (-1);
}
+ if ((opencl_vector_width != 0) && (opencl_vector_width != 1) && (opencl_vector_width != 2) && (opencl_vector_width != 4) && (opencl_vector_width != 8))
+ {
+ log_error ("ERROR: opencl-vector-width %i not allowed", opencl_vector_width);
+
+ return (-1);
+ }
+
if (show == 1 || left == 1)
{
attack_mode = ATTACK_MODE_NONE;
data.benchmark = benchmark;
data.skip = skip;
data.limit = limit;
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
data.powertune_enable = powertune_enable;
+ #endif
data.logfile_disable = logfile_disable;
data.truecrypt_keyfiles = truecrypt_keyfiles;
data.scrypt_tmto = scrypt_tmto;
* cpu affinity
*/
+ #ifndef OSX
if (cpu_affinity)
{
set_cpu_affinity (cpu_affinity);
}
+ #endif
if (rp_gen_seed_chgd == 0)
{
logfile_top_uint (force);
logfile_top_uint (kernel_accel);
logfile_top_uint (kernel_loops);
+ #ifdef HAVE_HWMON
logfile_top_uint (gpu_temp_abort);
logfile_top_uint (gpu_temp_disable);
logfile_top_uint (gpu_temp_retain);
+ #endif
logfile_top_uint (hash_mode);
logfile_top_uint (hex_charset);
logfile_top_uint (hex_salt);
logfile_top_uint (outfile_check_timer);
logfile_top_uint (outfile_format);
logfile_top_uint (potfile_disable);
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
logfile_top_uint (powertune_enable);
+ #endif
logfile_top_uint (scrypt_tmto);
logfile_top_uint (quiet);
logfile_top_uint (remove);
logfile_top_uint64 (limit);
logfile_top_uint64 (skip);
logfile_top_char (separator);
+ #ifndef OSX
logfile_top_string (cpu_affinity);
+ #endif
logfile_top_string (custom_charset_1);
logfile_top_string (custom_charset_2);
logfile_top_string (custom_charset_3);
logfile_top_string (opencl_devices);
logfile_top_string (opencl_platforms);
logfile_top_string (opencl_device_types);
+ logfile_top_uint (opencl_vector_width);
logfile_top_string (induction_dir);
logfile_top_string (markov_hcstat);
logfile_top_string (outfile);
* OpenCL platform selection
*/
- uint opencl_platforms_filter = setup_opencl_platforms_filter (opencl_platforms);
+ u32 opencl_platforms_filter = setup_opencl_platforms_filter (opencl_platforms);
/**
* OpenCL device selection
*/
- uint opencl_devicemask = devices_to_devicemask (opencl_devices);
+ u32 devices_filter = setup_devices_filter (opencl_devices);
/**
* OpenCL device type selection
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_NOT_SALTED
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_APPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_PREPENDED_SALT
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 14;
dgst_pos1 = 15;
parse_func = hmacsha512_parse_hash;
sort_by_digest = sort_by_digest_8_8;
opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_NOT_ITERATED;
dgst_pos0 = 14;
dgst_pos1 = 15;
parse_func = hmacsha512_parse_hash;
sort_by_digest = sort_by_digest_8_8;
opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_NOT_ITERATED;
dgst_pos0 = 14;
dgst_pos1 = 15;
dgst_size = DGST_SIZE_8_8;
parse_func = sha512crypt_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
parse_func = keccak_parse_hash;
sort_by_digest = sort_by_digest_8_25;
opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 2;
dgst_pos1 = 3;
dgst_size = DGST_SIZE_8_8;
parse_func = truecrypt_parse_hash_1k;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = truecrypt_parse_hash_1k;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = truecrypt_parse_hash_1k;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = sha512aix_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_16;
parse_func = sha512osx_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_16;
parse_func = sha512grub_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
sort_by_digest = sort_by_digest_4_4;
opti_type = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
- dgst_pos0 = 3;
- dgst_pos1 = 7;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
dgst_pos2 = 2;
- dgst_pos3 = 6;
+ dgst_pos3 = 3;
break;
case 7600: hash_type = HASH_TYPE_SHA1;
dgst_size = DGST_SIZE_8_8;
parse_func = drupal7_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
| OPTI_TYPE_EARLY_SKIP
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_NOT_SALTED
+ | OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_RAW_HASH;
dgst_pos0 = 6;
dgst_pos1 = 7;
dgst_size = DGST_SIZE_8_16;
parse_func = pbkdf2_sha512_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_8;
parse_func = ecryptfs_parse_hash;
sort_by_digest = sort_by_digest_8_8;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_8_16;
parse_func = oraclet_parse_hash;
sort_by_digest = sort_by_digest_8_16;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_USES_BITS_64;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
* potfile
*/
- char potfile[256];
-
- memset (potfile, 0, sizeof (potfile));
+ char potfile[256] = { 0 };
snprintf (potfile, sizeof (potfile) - 1, "%s/%s.pot", session_dir, session);
continue;
}
+ if (plain_len >= 255) continue;
+
memcpy (pot_ptr->plain_buf, plain_buf, plain_len);
pot_ptr->plain_len = plain_len;
* charsets : keep them together for more easy maintainnce
*/
- cs_t mp_sys[6];
- cs_t mp_usr[4];
-
- memset (mp_sys, 0, sizeof (mp_sys));
- memset (mp_usr, 0, sizeof (mp_usr));
+ cs_t mp_sys[6] = { { { 0 }, 0 } };
+ cs_t mp_usr[4] = { { { 0 }, 0 } };
mp_setup_sys (mp_sys);
if ((username && (remove || show)) || (opts_type & OPTS_TYPE_HASH_COPY))
{
- uint32_t hash_pos;
+ u32 hash_pos;
for (hash_pos = 0; hash_pos < hashes_avail; hash_pos++)
{
uint hccap_size = sizeof (hccap_t);
- char in[hccap_size];
+ char *in = (char *) mymalloc (hccap_size);
while (!feof (fp))
{
- int n = fread (&in, hccap_size, 1, fp);
+ int n = fread (in, hccap_size, 1, fp);
if (n != 1)
{
wpa_t *wpa = (wpa_t *) hashes_buf[hashes_cnt].esalt;
- unsigned char *pke_ptr = (unsigned char *) wpa->pke;
+ u8 *pke_ptr = (u8 *) wpa->pke;
// do the appending task
}
fclose (fp);
+
+ myfree (in);
}
else if (hash_mode == 3000)
{
pke[i] = byte_swap_32 (wpa->pke[i]);
}
- unsigned char mac1[6];
- unsigned char mac2[6];
+ u8 mac1[6];
+ u8 mac2[6];
memcpy (mac1, pke_ptr + 23, 6);
memcpy (mac2, pke_ptr + 29, 6);
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac1[i] != (unsigned char) hex_to_char (&mac1_pos[j]))
+ if (mac1[i] != hex_to_u8 ((const u8 *) &mac1_pos[j]))
{
found = NULL;
break;
for (uint i = 0, j = 0; i < 6; i++, j += 2)
{
- if (mac2[i] != (unsigned char) hex_to_char (&mac2_pos[j]))
+ if (mac2[i] != hex_to_u8 ((const u8 *) &mac2_pos[j]))
{
found = NULL;
break;
do
{
- truecrypt_crc32 (keyfile, (unsigned char *) keyfile_buf);
+ truecrypt_crc32 (keyfile, (u8 *) keyfile_buf);
} while ((keyfile = strtok (NULL, ",")) != NULL);
* Some algorithm, like descrypt, can benefit from JIT compilation
*/
- uint force_jit_compilation = 0;
+ int force_jit_compilation = -1;
if (hash_mode == 8900)
{
* OpenCL platforms: detect
*/
- cl_platform_id CL_platforms[CL_PLATFORMS_MAX];
+ cl_platform_id platforms[CL_PLATFORMS_MAX];
+
+ cl_uint platforms_cnt = 0;
+
+ cl_device_id platform_devices[DEVICES_MAX];
- uint CL_platforms_cnt = 0;
+ cl_uint platform_devices_cnt;
- hc_clGetPlatformIDs (CL_PLATFORMS_MAX, CL_platforms, &CL_platforms_cnt);
+ hc_clGetPlatformIDs (CL_PLATFORMS_MAX, platforms, &platforms_cnt);
- if (CL_platforms_cnt == 0)
+ if (platforms_cnt == 0)
{
log_error ("ERROR: No OpenCL compatible platform found");
* OpenCL platforms: For each platform check if we need to unset features that we can not use, eg: temp_retain
*/
- for (uint i = 0; i < CL_platforms_cnt; i++)
+ for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- cl_platform_id CL_platform = CL_platforms[i];
+ cl_platform_id platform = platforms[platform_id];
- char CL_platform_vendor[INFOSZ];
+ char platform_vendor[INFOSZ] = { 0 };
- memset (CL_platform_vendor, 0, sizeof (CL_platform_vendor));
+ hc_clGetPlatformInfo (platform, CL_PLATFORM_VENDOR, sizeof (platform_vendor), platform_vendor, NULL);
- hc_clGetPlatformInfo (CL_platform, CL_PLATFORM_VENDOR, sizeof (CL_platform_vendor), CL_platform_vendor, NULL);
-
- if (strcmp (CL_platform_vendor, CL_VENDOR_NV) == 0)
+ #ifdef HAVE_HWMON
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ if (strcmp (platform_vendor, CL_VENDOR_NV) == 0)
{
// make sure that we do not directly control the fan for NVidia
data.gpu_temp_retain = gpu_temp_retain;
}
+ #endif // HAVE_NVML || HAVE_NVAPI
+ #endif
}
/**
- * OpenCL devices: push all devices from all platforms into the same device array
+ * OpenCL devices: simply push all devices from all platforms into the same device array
*/
- uint devices_plf[DEVICES_MAX]; // device number on platform, required for hardware-management mapping
+ hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
- cl_device_id devices_all[DEVICES_MAX];
- cl_device_id devices[DEVICES_MAX];
+ data.devices_param = devices_param;
- uint devices_all_cnt = 0;
uint devices_cnt = 0;
- for (uint i = 0; i < CL_platforms_cnt; i++)
+ uint devices_active = 0;
+
+ for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- if ((opencl_platforms_filter & (1 << i)) == 0) continue;
+ if ((opencl_platforms_filter & (1 << platform_id)) == 0) continue;
- cl_platform_id CL_platform = CL_platforms[i];
+ cl_platform_id platform = platforms[platform_id];
- cl_device_id devices_platform[DEVICES_MAX];
+ hc_clGetDeviceIDs (platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
- cl_uint devices_platform_cnt = 0;
+ for (uint platform_devices_id = 0; platform_devices_id < platform_devices_cnt; platform_devices_id++)
+ {
+ const uint device_id = devices_cnt;
- hc_clGetDeviceIDs (CL_platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, devices_platform, &devices_platform_cnt);
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- for (uint j = 0; j < devices_platform_cnt; j++)
- {
- devices_all[devices_all_cnt] = devices_platform[j];
- devices_plf[devices_all_cnt] = j;
+ device_param->device = platform_devices[platform_devices_id];
- devices_all_cnt++;
- }
- }
+ device_param->device_id = device_id;
- /**
- * enable custom signal handler(s)
- */
+ device_param->platform_devices_id = platform_devices_id;
- if (benchmark == 0)
- {
- hc_signal (sigHandler_default);
- }
- else
- {
- hc_signal (sigHandler_benchmark);
- }
+ // device_type
- /**
- * devices mask and properties
- */
+ cl_device_type device_type;
- uint quiet_sav = quiet;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
- if (benchmark)
- {
- quiet = 0;
- }
+ device_type &= ~CL_DEVICE_TYPE_DEFAULT;
- for (uint device_all_id = 0; device_all_id < devices_all_cnt; device_all_id++)
- {
- // skip the device, if the user did specify a list of GPUs to skip
+ device_param->device_type = device_type;
- if (opencl_devicemask)
- {
- uint device_all_id_mask = 1 << device_all_id;
+ // vendor_id
- if ((device_all_id_mask & opencl_devicemask) != device_all_id_mask)
- {
- if (quiet == 0 && algorithm_pos == 0) log_info ("Device #%d: skipped", device_all_id + 1);
+ cl_uint vendor_id = 0;
- continue;
- }
- }
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_VENDOR_ID, sizeof (vendor_id), &vendor_id, NULL);
- const uint device_id = devices_cnt;
+ device_param->vendor_id = vendor_id;
- devices[device_id] = devices_all[device_all_id];
+ // device_name
- devices_plf[device_id] = devices_plf[device_all_id];
+ char *device_name = (char *) mymalloc (INFOSZ);
- cl_device_type device_type = 0;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_NAME, INFOSZ, device_name, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+ device_param->device_name = device_name;
- device_type &= ~CL_DEVICE_TYPE_DEFAULT;
+ // device_version
- if ((device_type & device_types_filter) == 0)
- {
- if (quiet == 0 && algorithm_pos == 0) log_info ("Device #%d: skipped", device_all_id + 1);
+ char *device_version = (char *) mymalloc (INFOSZ);
- continue;
- }
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_VERSION, INFOSZ, device_version, NULL);
- char device_name[INFOSZ];
- char device_version[INFOSZ];
+ device_param->device_version = device_version;
- memset (device_name, 0, sizeof (device_name));
- memset (device_version, 0, sizeof (device_version));
+ if (strstr (device_version, "pocl"))
+ {
+ // pocl returns the real vendor_id in CL_DEVICE_VENDOR_ID which causes many problems because of hms and missing amd_bfe () etc
+ // we need to overwrite vendor_id to avoid this. maybe open pocl issue?
- cl_ulong global_mem_size;
- cl_ulong max_mem_alloc_size;
- cl_uint max_clock_frequency;
- cl_uint max_compute_units;
+ cl_uint vendor_id = VENDOR_ID_GENERIC;
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_NAME, sizeof (device_name), &device_name, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (global_mem_size), &global_mem_size, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (max_mem_alloc_size), &max_mem_alloc_size, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (max_clock_frequency), &max_clock_frequency, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (max_compute_units), &max_compute_units, NULL);
- hc_clGetDeviceInfo (devices[device_id], CL_DEVICE_VERSION, sizeof (device_version), &device_version, NULL);
+ device_param->vendor_id = vendor_id;
+ }
- if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
- {
- log_info ("Device #%u: %s, %lu/%lu MB allocatable, %dMhz, %uMCU",
- device_all_id + 1,
- device_name,
- (unsigned int) (max_mem_alloc_size / 1024 / 1024),
- (unsigned int) (global_mem_size / 1024 / 1024),
- (unsigned int) (max_clock_frequency),
- (unsigned int) max_compute_units);
- }
+ // max_compute_units
- if (strstr (device_version, "pocl"))
- {
- if (force == 0)
+ cl_uint vector_width;
+
+ if (opencl_vector_width == OPENCL_VECTOR_WIDTH)
{
- log_info ("");
- log_info ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
- log_info ("You are STRONGLY encouraged not to use it");
- log_info ("You can use --force to override this but do not post error reports if you do so");
- log_info ("");
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
- return (-1);
- }
- }
+ if ((vendor_id == VENDOR_ID_NV) && (strstr (device_name, " Ti") || strstr (device_name, " TI")))
+ {
+ // Yeah that's a super bad hack, but there's no other attribute we could use
- devices_cnt++;
- }
+ if (vector_width < 2) vector_width *= 2;
+ }
- quiet = quiet_sav;
+ if (opti_type & OPTI_TYPE_USES_BITS_64)
+ {
+ if (vector_width > 1) vector_width /= 2;
+ }
+ }
+ else
+ {
+ vector_width = opencl_vector_width;
+ }
- if (devices_cnt == 0)
- {
- log_error ("ERROR: No devices left that matches your specification.");
+ if (vector_width > 8) vector_width = 8;
- return (-1);
- }
+ device_param->vector_width = vector_width;
- data.devices_cnt = devices_cnt;
+ // max_compute_units
- if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
- {
- log_info ("");
- }
+ cl_uint device_processors;
- /**
- * User-defined GPU temp handling
- */
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (device_processors), &device_processors, NULL);
- if (gpu_temp_disable == 1)
- {
- gpu_temp_abort = 0;
- gpu_temp_retain = 0;
- }
+ device_param->device_processors = device_processors;
- if ((gpu_temp_abort != 0) && (gpu_temp_retain != 0))
- {
- if (gpu_temp_abort < gpu_temp_retain)
- {
- log_error ("ERROR: invalid values for gpu-temp-abort. Parameter gpu-temp-abort is less than gpu-temp-retain.");
+ // max_mem_alloc_size
- return (-1);
- }
- }
+ cl_ulong device_maxmem_alloc;
- data.gpu_temp_disable = gpu_temp_disable;
- data.gpu_temp_abort = gpu_temp_abort;
- data.gpu_temp_retain = gpu_temp_retain;
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (device_maxmem_alloc), &device_maxmem_alloc, NULL);
- /**
- * inform the user
- */
+ device_param->device_maxmem_alloc = device_maxmem_alloc;
- if (data.quiet == 0)
- {
- log_info ("Hashes: %u hashes; %u unique digests, %u unique salts", hashes_cnt_orig, digests_cnt, salts_cnt);
+ // max_mem_alloc_size
- log_info ("Bitmaps: %u bits, %u entries, 0x%08x mask, %u bytes, %u/%u rotates", bitmap_bits, bitmap_nums, bitmap_mask, bitmap_size, bitmap_shift1, bitmap_shift2);
+ cl_ulong device_global_mem;
- if (attack_mode == ATTACK_MODE_STRAIGHT)
- {
- log_info ("Rules: %u", kernel_rules_cnt);
- }
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (device_global_mem), &device_global_mem, NULL);
- if (opti_type)
- {
- log_info ("Applicable Optimizers:");
+ device_param->device_global_mem = device_global_mem;
- for (uint i = 0; i < 32; i++)
- {
- const uint opti_bit = 1 << i;
+ // max_clock_frequency
- if (opti_type & opti_bit) log_info ("* %s", stroptitype (opti_bit));
- }
- }
+ cl_uint device_maxclock_frequency;
- /**
- * Watchdog and Temperature balance
- */
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (device_maxclock_frequency), &device_maxclock_frequency, NULL);
- if (gpu_temp_abort == 0)
- {
- log_info ("Watchdog: Temperature abort trigger disabled");
- }
- else
- {
- log_info ("Watchdog: Temperature abort trigger set to %uc", gpu_temp_abort);
- }
+ device_param->device_maxclock_frequency = device_maxclock_frequency;
- if (gpu_temp_retain == 0)
- {
- log_info ("Watchdog: Temperature retain trigger disabled");
- }
- else
- {
- log_info ("Watchdog: Temperature retain trigger set to %uc", gpu_temp_retain);
- }
- }
+ // skipped
- if (data.quiet == 0) log_info ("");
+ const u32 skipped1 = ((devices_filter & (1 << device_id)) == 0);
+ const u32 skipped2 = ((device_types_filter & (device_type)) == 0);
- /**
- * devices init
- */
+ device_param->skipped = (skipped1 || skipped2);
- int *temp_retain_fanspeed_value = (int *) mycalloc (devices_cnt, sizeof (int));
+ // driver_version
- ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (devices_cnt, sizeof (ADLOD6MemClockState));
+ char *driver_version = (char *) mymalloc (INFOSZ);
- int *od_power_control_status = (int *) mycalloc (devices_cnt, sizeof (int));
+ hc_clGetDeviceInfo (device_param->device, CL_DRIVER_VERSION, INFOSZ, driver_version, NULL);
- hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (devices_cnt, sizeof (hc_device_param_t));
+ device_param->driver_version = driver_version;
- data.devices_param = devices_param;
+ // device_name_chksum
- for (uint device_id = 0; device_id < devices_cnt; device_id++)
- {
- hc_device_param_t *device_param = &data.devices_param[device_id];
+ char *device_name_chksum = (char *) mymalloc (INFOSZ);
- cl_device_id device = devices[device_id];
-
- device_param->device = device;
+ #if __x86_64__
+ snprintf (device_name_chksum, INFOSZ - 1, "%u-%u-%u-%s-%s-%s-%u", 64, device_param->vendor_id, device_param->vector_width, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ #else
+ snprintf (device_name_chksum, INFOSZ - 1, "%u-%u-%u-%s-%s-%s-%u", 32, device_param->vendor_id, device_param->vector_width, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ #endif
- cl_device_type device_type = 0;
+ uint device_name_digest[4];
- hc_clGetDeviceInfo (device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+ device_name_digest[0] = 0;
+ device_name_digest[1] = 0;
+ device_name_digest[2] = 0;
+ device_name_digest[3] = 0;
- device_param->device_type = device_type;
+ md5_64 ((uint *) device_name_chksum, device_name_digest);
- cl_uint max_compute_units = 0;
+ sprintf (device_name_chksum, "%08x", device_name_digest[0]);
- hc_clGetDeviceInfo (device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (max_compute_units), &max_compute_units, NULL);
+ device_param->device_name_chksum = device_name_chksum;
- device_param->device_processors = max_compute_units;
+ // device_processor_cores
- cl_ulong max_mem_alloc_size = 0;
+ if (device_type & CL_DEVICE_TYPE_CPU)
+ {
+ cl_uint device_processor_cores = 1;
- hc_clGetDeviceInfo (device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (max_mem_alloc_size), &max_mem_alloc_size, NULL);
+ device_param->device_processor_cores = device_processor_cores;
+ }
- device_param->device_maxmem_alloc = max_mem_alloc_size;
+ if (device_type & CL_DEVICE_TYPE_GPU)
+ {
+ if (vendor_id == VENDOR_ID_AMD)
+ {
+ cl_uint device_processor_cores = 0;
- cl_uint vendor_id = 0;
+ #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
- hc_clGetDeviceInfo (device, CL_DEVICE_VENDOR_ID, sizeof (vendor_id), &vendor_id, NULL);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
- device_param->vendor_id = vendor_id;
+ device_param->device_processor_cores = device_processor_cores;
+ }
+ else if (vendor_id == VENDOR_ID_NV)
+ {
+ cl_uint kernel_exec_timeout = 0;
- char tmp[INFOSZ], t1[64];
+ #define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
- memset (tmp, 0, sizeof (tmp));
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
- hc_clGetDeviceInfo (device, CL_DEVICE_NAME, sizeof (tmp), &tmp, NULL);
+ device_param->kernel_exec_timeout = kernel_exec_timeout;
- device_param->device_name = mystrdup (tmp);
+ cl_uint device_processor_cores = 0;
- memset (tmp, 0, sizeof (tmp));
+ #define CL_DEVICE_WARP_SIZE_NV 0x4003
- hc_clGetDeviceInfo (device, CL_DEVICE_VERSION, sizeof (tmp), &tmp, NULL);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
- if (strstr (tmp, "pocl"))
- {
- // pocl returns the real AMD vendor_id id in CL_DEVICE_VENDOR_ID which causes many problems because of hms and missing amd_bfe () etc
- // we need to overwrite vendor_id to avoid this. maybe open pocl issue?
+ device_param->device_processor_cores = device_processor_cores;
- cl_uint vendor_id = 0xffff;
+ cl_uint sm_minor = 0;
+ cl_uint sm_major = 0;
- device_param->vendor_id = vendor_id;
- }
+ #define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
+ #define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
- memset (t1, 0, sizeof (t1));
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
+ hc_clGetDeviceInfo (device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
- sscanf (tmp, "%*16s %*16s %*16s (%[^)]16s)", t1);
+ device_param->sm_minor = sm_minor;
+ device_param->sm_major = sm_major;
+ }
+ else
+ {
+ cl_uint device_processor_cores = 1;
- device_param->device_version = mystrdup (t1);
+ device_param->device_processor_cores = device_processor_cores;
+ }
+ }
- memset (tmp, 0, sizeof (tmp));
+ // display results
- hc_clGetDeviceInfo (device, CL_DRIVER_VERSION, sizeof (tmp), &tmp, NULL);
+ if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
+ {
+ if (device_param->skipped == 0)
+ {
+ log_info ("Device #%u: %s, %lu/%lu MB allocatable, %dMhz, %uMCU",
+ device_id + 1,
+ device_name,
+ (unsigned int) (device_maxmem_alloc / 1024 / 1024),
+ (unsigned int) (device_global_mem / 1024 / 1024),
+ (unsigned int) (device_maxclock_frequency),
+ (unsigned int) device_processors);
+ }
+ else
+ {
+ log_info ("Device #%u: %s, skipped",
+ device_id + 1,
+ device_name);
+ }
+ }
- device_param->driver_version = mystrdup (tmp);
+ // common driver check
- // create some filename that is easier to read on cached folder
+ if (device_param->skipped == 0)
+ {
+ if (strstr (device_version, "pocl"))
+ {
+ if (force == 0)
+ {
+ log_info ("");
+ log_info ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
+ log_info ("You are STRONGLY encouraged not to use it");
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
- snprintf (tmp, sizeof (tmp) - 1, "%u-%s-%s-%s-%d", device_param->vendor_id, device_param->device_name, device_param->device_version, device_param->driver_version, COMPTIME);
+ return (-1);
+ }
+ }
- uint device_name_digest[4];
+ if (device_type & CL_DEVICE_TYPE_GPU)
+ {
+ if (vendor_id == VENDOR_ID_NV)
+ {
+ if (device_param->kernel_exec_timeout != 0)
+ {
+ if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
+ if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
+ }
+ }
+ else if (vendor_id == VENDOR_ID_AMD)
+ {
+ int catalyst_check = (force == 1) ? 0 : 1;
- device_name_digest[0] = 0;
- device_name_digest[1] = 0;
- device_name_digest[2] = 0;
- device_name_digest[3] = 0;
+ int catalyst_warn = 0;
- md5_64 ((uint *) tmp, device_name_digest);
+ int catalyst_broken = 0;
- sprintf (tmp, "%08x", device_name_digest[0]);
+ if (catalyst_check == 1)
+ {
+ catalyst_warn = 1;
- device_param->device_name_chksum = mystrdup (tmp);
+ // v14.9 and higher
+ if (atoi (device_param->driver_version) >= 1573)
+ {
+ catalyst_warn = 0;
+ }
- if (device_type & CL_DEVICE_TYPE_CPU)
- {
- cl_uint device_processor_cores = 1;
+ catalyst_check = 0;
+ }
- device_param->device_processor_cores = device_processor_cores;
- }
+ if (catalyst_broken == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! The installed catalyst driver in your system is known to be broken!");
+ log_info ("It will pass over cracked hashes and does not report them as cracked");
+ log_info ("You are STRONGLY encouraged not to use it");
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
- if (device_type & CL_DEVICE_TYPE_GPU)
- {
- if (vendor_id == VENDOR_ID_AMD)
- {
- cl_uint device_processor_cores = 0;
+ return (-1);
+ }
- #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
+ if (catalyst_warn == 1)
+ {
+ log_info ("");
+ log_info ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
+ log_info ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
+ log_info ("See oclHashcat's homepage for official supported catalyst drivers");
+ #ifdef _WIN
+ log_info ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
+ #endif
+ log_info ("You can use --force to override this but do not post error reports if you do so");
+ log_info ("");
- hc_clGetDeviceInfo (device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
+ return (-1);
+ }
+ }
+ }
- device_param->device_processor_cores = device_processor_cores;
+ devices_active++;
}
- else if (vendor_id == VENDOR_ID_NV)
- {
- cl_uint kernel_exec_timeout = 0;
- #define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
+ // next please
- hc_clGetDeviceInfo (device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
+ devices_cnt++;
+ }
+ }
- device_param->kernel_exec_timeout = kernel_exec_timeout;
+ if (devices_active == 0)
+ {
+ log_error ("ERROR: No devices found/left");
- cl_uint device_processor_cores = 0;
+ return (-1);
+ }
- #define CL_DEVICE_WARP_SIZE_NV 0x4003
+ data.devices_cnt = devices_cnt;
- hc_clGetDeviceInfo (device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
+ data.devices_active = devices_active;
- device_param->device_processor_cores = device_processor_cores;
+ if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
+ {
+ log_info ("");
+ }
- cl_uint sm_minor = 0;
- cl_uint sm_major = 0;
+ /**
+ * OpenCL devices: allocate buffer for device specific information
+ */
- #define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
- #define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
+ #ifdef HAVE_HWMON
+ int *temp_retain_fanspeed_value = (int *) mycalloc (devices_cnt, sizeof (int));
- hc_clGetDeviceInfo (device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
- hc_clGetDeviceInfo (device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
+ #ifdef HAVE_ADL
+ ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (devices_cnt, sizeof (ADLOD6MemClockState));
- device_param->sm_minor = sm_minor;
- device_param->sm_major = sm_major;
- }
- else
- {
- cl_uint device_processor_cores = 1;
+ int *od_power_control_status = (int *) mycalloc (devices_cnt, sizeof (int));
+ #endif // ADL
+ #endif
- device_param->device_processor_cores = device_processor_cores;
- }
- }
+ /**
+ * enable custom signal handler(s)
+ */
- /**
- * common driver check
- */
+ if (benchmark == 0)
+ {
+ hc_signal (sigHandler_default);
+ }
+ else
+ {
+ hc_signal (sigHandler_benchmark);
+ }
- if (device_type & CL_DEVICE_TYPE_GPU)
+ /**
+ * User-defined GPU temp handling
+ */
+
+ #ifdef HAVE_HWMON
+ if (gpu_temp_disable == 1)
+ {
+ gpu_temp_abort = 0;
+ gpu_temp_retain = 0;
+ }
+
+ if ((gpu_temp_abort != 0) && (gpu_temp_retain != 0))
+ {
+ if (gpu_temp_abort < gpu_temp_retain)
{
- if (vendor_id == VENDOR_ID_NV)
- {
- if (device_param->kernel_exec_timeout != 0)
- {
- if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
- if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
- }
- }
- else if (vendor_id == VENDOR_ID_AMD)
- {
- int catalyst_check = (force == 1) ? 0 : 1;
+ log_error ("ERROR: invalid values for gpu-temp-abort. Parameter gpu-temp-abort is less than gpu-temp-retain.");
- int catalyst_warn = 0;
+ return (-1);
+ }
+ }
- int catalyst_broken = 0;
+ data.gpu_temp_disable = gpu_temp_disable;
+ data.gpu_temp_abort = gpu_temp_abort;
+ data.gpu_temp_retain = gpu_temp_retain;
+ #endif
- if (catalyst_check == 1)
- {
- catalyst_warn = 1;
+ /**
+ * inform the user
+ */
- // v14.9 and higher
- if ((atoi (device_param->device_version) >= 1573)
- && (atoi (device_param->driver_version) >= 1573))
- {
- catalyst_warn = 0;
- }
+ if (data.quiet == 0)
+ {
+ log_info ("Hashes: %u hashes; %u unique digests, %u unique salts", hashes_cnt_orig, digests_cnt, salts_cnt);
- catalyst_check = 0;
- }
+ log_info ("Bitmaps: %u bits, %u entries, 0x%08x mask, %u bytes, %u/%u rotates", bitmap_bits, bitmap_nums, bitmap_mask, bitmap_size, bitmap_shift1, bitmap_shift2);
- if (catalyst_broken == 1)
- {
- log_error ("");
- log_error ("ATTENTION! The installed catalyst driver in your system is known to be broken!");
- log_error ("It will pass over cracked hashes and does not report them as cracked");
- log_error ("You are STRONGLY encouraged not to use it");
- log_error ("You can use --force to override this but do not post error reports if you do so");
+ if (attack_mode == ATTACK_MODE_STRAIGHT)
+ {
+ log_info ("Rules: %u", kernel_rules_cnt);
+ }
- return (-1);
- }
+ if (opti_type)
+ {
+ log_info ("Applicable Optimizers:");
- if (catalyst_warn == 1)
- {
- log_error ("");
- log_error ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
- log_error ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
- log_error ("See oclHashcat's homepage for official supported catalyst drivers");
- #ifdef _WIN
- log_error ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
- #endif
- log_error ("You can use --force to override this but do not post error reports if you do so");
+ for (uint i = 0; i < 32; i++)
+ {
+ const uint opti_bit = 1u << i;
- return (-1);
- }
+ if (opti_type & opti_bit) log_info ("* %s", stroptitype (opti_bit));
}
}
+
+ /**
+ * Watchdog and Temperature balance
+ */
+
+ #ifdef HAVE_HWMON
+ if (gpu_temp_abort == 0)
+ {
+ log_info ("Watchdog: Temperature abort trigger disabled");
+ }
+ else
+ {
+ log_info ("Watchdog: Temperature abort trigger set to %uc", gpu_temp_abort);
+ }
+
+ if (gpu_temp_retain == 0)
+ {
+ log_info ("Watchdog: Temperature retain trigger disabled");
+ }
+ else
+ {
+ log_info ("Watchdog: Temperature retain trigger set to %uc", gpu_temp_retain);
+ }
+ #endif
}
+ if (data.quiet == 0) log_info ("");
+
/**
* HM devices: init
*/
- hm_attrs_t hm_adapters_nv[DEVICES_MAX];
- hm_attrs_t hm_adapters_amd[DEVICES_MAX];
+ #ifdef HAVE_HWMON
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
+ hm_attrs_t hm_adapters_nv[DEVICES_MAX] = { { { 0 }, 0, 0 } };
+ #endif
- memset (hm_adapters_nv, 0, sizeof (hm_adapters_nv));
- memset (hm_adapters_amd, 0, sizeof (hm_adapters_amd));
+ #ifdef HAVE_ADL
+ hm_attrs_t hm_adapters_amd[DEVICES_MAX] = { { { 0 }, 0, 0 } };
+ #endif
if (gpu_temp_disable == 0)
{
- #ifdef WIN
+ #if defined(WIN) && defined(HAVE_NVAPI)
if (NvAPI_Initialize () == NVAPI_OK)
{
HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX];
if (NvAPI_GPU_GetTachReading (hm_adapters_nv[i].adapter_index.nv, &speed) != NVAPI_NOT_SUPPORTED) hm_adapters_nv[i].fan_supported = 1;
}
}
- #endif
+ #endif // WIN && HAVE_NVAPI
- #ifdef LINUX
+ #if defined(LINUX) && defined(HAVE_NVML)
HM_LIB hm_dll_nv = hm_init (VENDOR_ID_NV);
data.hm_dll_nv = hm_dll_nv;
}
}
}
- #endif
+ #endif // LINUX && HAVE_NVML
+ #ifdef HAVE_ADL
HM_LIB hm_dll_amd = hm_init (VENDOR_ID_AMD);
data.hm_dll_amd = hm_dll_amd;
int num_adl_adapters = 0;
- uint32_t *valid_adl_device_list = hm_get_list_valid_adl_adapters (hm_adapters_num, &num_adl_adapters, lpAdapterInfo);
+ u32 *valid_adl_device_list = hm_get_list_valid_adl_adapters (hm_adapters_num, &num_adl_adapters, lpAdapterInfo);
if (num_adl_adapters > 0)
{
myfree (lpAdapterInfo);
}
}
+ #endif // HAVE_ADL
}
/**
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
- cl_uint device_id_on_platform = devices_plf[device_id];
+ if (device_param->skipped) continue;
+
+ const uint platform_devices_id = device_param->platform_devices_id;
+ #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
if (device_param->vendor_id == VENDOR_ID_NV)
{
- memcpy (&data.hm_device[device_id], &hm_adapters_nv[device_id_on_platform], sizeof (hm_attrs_t));
+ memcpy (&data.hm_device[device_id], &hm_adapters_nv[platform_devices_id], sizeof (hm_attrs_t));
}
+ #endif
+ #ifdef HAVE_ADL
if (device_param->vendor_id == VENDOR_ID_AMD)
{
- memcpy (&data.hm_device[device_id], &hm_adapters_amd[device_id_on_platform], sizeof (hm_attrs_t));
+ memcpy (&data.hm_device[device_id], &hm_adapters_amd[platform_devices_id], sizeof (hm_attrs_t));
}
+ #endif
}
}
* Driver / ADL bug?
*/
+ #ifdef HAVE_ADL
if (powertune_enable == 1)
{
hc_thread_mutex_lock (mux_adl);
for (uint device_id = 0; device_id < devices_cnt; device_id++)
{
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
if (data.hm_device[device_id].od_version == 6)
{
// set powertune value only
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADK
+ #endif // HAVE_HWMON
uint kernel_blocks_all = 0;
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
/**
* device properties
*/
* create command-queue
*/
- // not support with NV
+ // not supported with NV
// device_param->command_queue = hc_clCreateCommandQueueWithProperties (device_param->context, device_param->device, NULL);
device_param->command_queue = hc_clCreateCommandQueue (device_param->context, device_param->device, 0);
if (device_type & CL_DEVICE_TYPE_CPU)
{
- // CPU still need lots of workitems, don't know why...
- // for testing phase, lets start with this
-
- kernel_accel = 1;
+ if (benchmark_mode == 0)
+ {
+ if (kernel_accel > 16)
+ {
+ kernel_accel = 16;
+ }
+ }
+ else
+ {
+ if (kernel_accel > 64)
+ {
+ kernel_accel = 64;
+ }
+ }
}
uint kernel_power = device_processors * kernel_threads * kernel_accel;
uint size_bfs = KERNEL_BFS * sizeof (bf_t);
uint size_tm = 32 * sizeof (bs_word_t);
- uint64_t size_scryptV = 1;
+ u64 size_scryptV = 1;
if ((hash_mode == 8900) || (hash_mode == 9300))
{
// we don't have sm_* on vendors not NV but it doesn't matter
- sprintf (build_opts, "-I%s/ -DVENDOR_ID=%d -DCUDA_ARCH=%d", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor);
+ sprintf (build_opts, "-I%s/ -DVENDOR_ID=%d -DCUDA_ARCH=%d -DVECT_SIZE=%u -DDEVICE_TYPE=%u", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type);
/**
* main kernel
* kernel source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
generate_source_kernel_filename (attack_exec, attack_kern, kern_type, shared_dir, source_file);
* kernel cached filename
*/
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
+ char cached_file[256] = { 0 };
generate_cached_kernel_filename (attack_exec, attack_kern, kern_type, profile_dir, device_name_chksum, cached_file);
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
- if (force_jit_compilation == 0)
+ if (force_jit_compilation == -1)
{
if (cached == 0)
{
clGetProgramInfo (device_param->program, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program, 1, &device_param->device, build_opts, NULL, NULL);
}
* kernel mp source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
generate_source_kernel_mp_filename (opti_type, opts_type, shared_dir, source_file);
* kernel mp cached filename
*/
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
+ char cached_file[256] = { 0 };
generate_cached_kernel_mp_filename (opti_type, opts_type, profile_dir, device_name_chksum, cached_file);
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
if (cached == 0)
{
clGetProgramInfo (device_param->program_mp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program_mp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_mp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program_mp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL);
}
* kernel amp source filename
*/
- char source_file[256];
-
- memset (source_file, 0, sizeof (source_file));
+ char source_file[256] = { 0 };
generate_source_kernel_amp_filename (attack_kern, shared_dir, source_file);
* kernel amp cached filename
*/
- char cached_file[256];
-
- memset (cached_file, 0, sizeof (cached_file));
+ char cached_file[256] = { 0 };
generate_cached_kernel_amp_filename (attack_kern, profile_dir, device_name_chksum, cached_file);
size_t *kernel_lengths = (size_t *) mymalloc (sizeof (size_t));
- const unsigned char **kernel_sources = (const unsigned char **) mymalloc (sizeof (unsigned char *));
+ const u8 **kernel_sources = (const u8 **) mymalloc (sizeof (u8 *));
if (cached == 0)
{
clGetProgramInfo (device_param->program_amp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
- unsigned char *binary = (unsigned char *) mymalloc (binary_size);
+ u8 *binary = (u8 *) mymalloc (binary_size);
clGetProgramInfo (device_param->program_amp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_amp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const unsigned char **) kernel_sources, NULL);
+ device_param->program_amp = hc_clCreateProgramWithBinary (device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
hc_clBuildProgram (device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL);
}
* kernel name
*/
- char kernel_name[64];
-
- memset (kernel_name, 0, sizeof (kernel_name));
+ char kernel_name[64] = { 0 };
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
* Store initial fanspeed if gpu_temp_retain is enabled
*/
+ #if defined(HAVE_HWMON) && defined(HAVE_ADL)
int gpu_temp_retain_set = 0;
if (gpu_temp_disable == 0)
int engine_clock_profile_max = od_clock_mem_status[device_id].state.aLevels[1].iEngineClock;
int memory_clock_profile_max = od_clock_mem_status[device_id].state.aLevels[1].iMemoryClock;
- // warning if profile has to low max values
+ // warning if profile has too low max values
if ((engine_clock_max - engine_clock_profile_max) > warning_trigger_engine)
{
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_HWMON && HAVE_ADL
}
data.kernel_blocks_all = kernel_blocks_all;
* keep track of the progress
*/
- data.words_progress_done = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
- data.words_progress_rejected = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
- data.words_progress_restored = (uint64_t *) mycalloc (data.salts_cnt, sizeof (uint64_t));
+ data.words_progress_done = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
+ data.words_progress_rejected = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
+ data.words_progress_restored = (u64 *) mycalloc (data.salts_cnt, sizeof (u64));
/**
* open filehandles
data.quiet = 1;
- const uint64_t words1_cnt = count_words (wl_data, fp1, dictfile1, dictstat_base, &dictstat_nmemb);
+ const u64 words1_cnt = count_words (wl_data, fp1, dictfile1, dictstat_base, &dictstat_nmemb);
data.quiet = quiet;
data.quiet = 1;
- const uint64_t words2_cnt = count_words (wl_data, fp2, dictfile2, dictstat_base, &dictstat_nmemb);
+ const u64 words2_cnt = count_words (wl_data, fp2, dictfile2, dictstat_base, &dictstat_nmemb);
data.quiet = quiet;
if (weak_hash_threshold >= salts_cnt)
{
+ uint first_device_id = 0;
+
+ for (uint device_id = 0; device_id < devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ first_device_id = device_id;
+
+ break;
+ }
+
if (data.quiet == 0) log_info_nn ("Checking for weak hashes...");
for (uint salt_pos = 0; salt_pos < salts_cnt; salt_pos++)
{
- weak_hash_check (&data.devices_param[0], salt_pos, kernel_loops);
+ weak_hash_check (&data.devices_param[first_device_id], salt_pos, kernel_loops);
}
}
data.css_cnt = css_cnt;
data.css_buf = css_buf;
- uint uniq_tbls[SP_PW_MAX][CHARSIZ];
-
- memset (uniq_tbls, 0, sizeof (uniq_tbls));
+ uint uniq_tbls[SP_PW_MAX][CHARSIZ] = { { 0 } };
mp_css_to_uniq_tbl (css_cnt, css_buf, uniq_tbls);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->kernel_params_mp[0] = &device_param->d_combs;
device_param->kernel_params_mp[1] = &device_param->d_root_css_buf;
device_param->kernel_params_mp[2] = &device_param->d_markov_css_buf;
data.devices_status = STATUS_INIT;
- memset (data.words_progress_done, 0, data.salts_cnt * sizeof (uint64_t));
- memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (uint64_t));
- memset (data.words_progress_restored, 0, data.salts_cnt * sizeof (uint64_t));
+ memset (data.words_progress_done, 0, data.salts_cnt * sizeof (u64));
+ memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (u64));
+ memset (data.words_progress_restored, 0, data.salts_cnt * sizeof (u64));
memset (data.cpt_buf, 0, CPT_BUF * sizeof (cpt_t));
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->speed_pos = 0;
- memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (uint64_t));
+ memset (device_param->speed_cnt, 0, SPEED_CACHE * sizeof (u64));
memset (device_param->speed_ms, 0, SPEED_CACHE * sizeof (float));
memset (device_param->speed_rec, 0, SPEED_CACHE * sizeof (hc_timer_t));
if (maskpos > 0 && dictpos == 0) free (masks[maskpos - 1]);
- uint uniq_tbls[SP_PW_MAX][CHARSIZ];
-
- memset (uniq_tbls, 0, sizeof (uniq_tbls));
+ uint uniq_tbls[SP_PW_MAX][CHARSIZ] = { { 0 } };
mp_css_to_uniq_tbl (css_cnt, css_buf, uniq_tbls);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
device_param->kernel_params_mp_l[0] = &device_param->d_pws_buf;
device_param->kernel_params_mp_l[1] = &device_param->d_root_css_buf;
device_param->kernel_params_mp_l[2] = &device_param->d_markov_css_buf;
}
}
- uint64_t words_base = data.words_cnt;
+ u64 words_base = data.words_cnt;
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
{
hc_device_param_t *device_param = &devices_param[device_id];
- device_param->device_id = device_id;
-
if (wordlist_mode == WL_MODE_STDIN)
{
hc_thread_create (c_threads[device_id], thread_calc_stdin, device_param);
{
hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->skipped) continue;
+
local_free (device_param->result);
local_free (device_param->pw_caches);
// reset default fan speed
+ #ifdef HAVE_HWMON
if (gpu_temp_disable == 0)
{
+ #ifdef HAVE_ADL
if (gpu_temp_retain != 0) // VENDOR_ID_AMD is implied here
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].fan_supported == 1)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if (data.hm_device[device_id].fan_supported == 1)
{
- int fanspeed = temp_retain_fanspeed_value[i];
+ int fanspeed = temp_retain_fanspeed_value[device_id];
if (fanspeed == -1) continue;
- int rc = hm_set_fanspeed_with_device_id_amd (i, fanspeed);
+ int rc = hm_set_fanspeed_with_device_id_amd (device_id, fanspeed);
- if (rc == -1) log_info ("WARNING: Failed to restore default fan speed for gpu number: %i:", i);
+ if (rc == -1) log_info ("WARNING: Failed to restore default fan speed for gpu number: %i:", device_id);
}
}
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADL
}
// reset power tuning
+ #ifdef HAVE_ADL
if (powertune_enable == 1) // VENDOR_ID_AMD is implied here
{
hc_thread_mutex_lock (mux_adl);
- for (uint i = 0; i < data.devices_cnt; i++)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- if (data.hm_device[i].od_version == 6)
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ if (data.hm_device[device_id].od_version == 6)
{
// check powertune capabilities first, if not available then skip device
int powertune_supported = 0;
- if ((hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[i].adapter_index.amd, &powertune_supported)) != ADL_OK)
+ if ((hc_ADL_Overdrive6_PowerControl_Caps (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, &powertune_supported)) != ADL_OK)
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
{
// powercontrol settings
- if ((hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[i].adapter_index.amd, od_power_control_status[i])) != ADL_OK)
+ if ((hc_ADL_Overdrive_PowerControl_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, od_power_control_status[device_id])) != ADL_OK)
{
log_info ("ERROR: Failed to restore the ADL PowerControl values");
performance_state->iNumberOfPerformanceLevels = 2;
- performance_state->aLevels[0].iEngineClock = od_clock_mem_status[i].state.aLevels[0].iEngineClock;
- performance_state->aLevels[1].iEngineClock = od_clock_mem_status[i].state.aLevels[1].iEngineClock;
- performance_state->aLevels[0].iMemoryClock = od_clock_mem_status[i].state.aLevels[0].iMemoryClock;
- performance_state->aLevels[1].iMemoryClock = od_clock_mem_status[i].state.aLevels[1].iMemoryClock;
+ performance_state->aLevels[0].iEngineClock = od_clock_mem_status[device_id].state.aLevels[0].iEngineClock;
+ performance_state->aLevels[1].iEngineClock = od_clock_mem_status[device_id].state.aLevels[1].iEngineClock;
+ performance_state->aLevels[0].iMemoryClock = od_clock_mem_status[device_id].state.aLevels[0].iMemoryClock;
+ performance_state->aLevels[1].iMemoryClock = od_clock_mem_status[device_id].state.aLevels[1].iMemoryClock;
- if ((hc_ADL_Overdrive_State_Set (data.hm_dll_amd, data.hm_device[i].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
+ if ((hc_ADL_Overdrive_State_Set (data.hm_dll_amd, data.hm_device[device_id].adapter_index.amd, ADL_OD6_SETSTATE_PERFORMANCE, performance_state)) != ADL_OK)
{
log_info ("ERROR: Failed to restore ADL performance state");
hc_thread_mutex_unlock (mux_adl);
}
+ #endif // HAVE_ADL
if (gpu_temp_disable == 0)
{
- #ifdef LINUX
+ #if defined(LINUX) && defined(HAVE_NVML)
if (data.hm_dll_nv)
{
hc_NVML_nvmlShutdown (data.hm_dll_nv);
}
#endif
- #ifdef WIN
+ #if defined(WIN) && (HAVE_NVAPI)
NvAPI_Unload ();
#endif
+ #ifdef HAVE_ADL
if (data.hm_dll_amd)
{
hc_ADL_Main_Control_Destroy (data.hm_dll_amd);
hm_close (data.hm_dll_amd);
}
+ #endif
}
+ #endif // HAVE_HWMON
// free memory
local_free (bitmap_s2_c);
local_free (bitmap_s2_d);
+ #ifdef HAVE_HWMON
local_free (temp_retain_fanspeed_value);
+ #ifdef HAVE_ADL
local_free (od_clock_mem_status);
local_free (od_power_control_status);
+ #endif // ADL
+ #endif
global_free (devices_param);