-/**
+ /**
* Authors.....: Jens Steube <jens.steube@gmail.com>
* Gabriele Gristina <matrix@hashcat.net>
* magnum <john.magnum@hushmail.com>
#define MARKOV_DISABLE 0
#define MARKOV_CLASSIC 0
#define BENCHMARK 0
+#define STDOUT_FLAG 0
#define RESTORE 0
#define RESTORE_TIMER 60
#define RESTORE_DISABLE 0
#define SEPARATOR ':'
#define BITMAP_MIN 16
#define BITMAP_MAX 24
+#define NVIDIA_SPIN_DAMP 100
#define GPU_TEMP_DISABLE 0
#define GPU_TEMP_ABORT 90
-#define GPU_TEMP_RETAIN 0
+#define GPU_TEMP_RETAIN 65
#define WORKLOAD_PROFILE 2
#define KERNEL_ACCEL 0
#define KERNEL_LOOPS 0
#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 143
+#define NVIDIA_100PERCENTCPU_WORKAROUND 100
+
#define global_free(attr) \
{ \
myfree ((void *) data.attr); \
" --outfile-autohex-disable | | Disable the use of $HEX[] in output plains |",
" --outfile-check-timer | Num | Sets seconds between outfile checks to X | --outfile-check=30",
" -p, --separator | Char | Separator char for hashlists and outfile | -p :",
+ " --stdout | | Do not crack a hash, instead print candidates only |",
" --show | | Show cracked passwords only |",
" --left | | Show un-cracked passwords only |",
" --username | | Enable ignoring of usernames in hashfile |",
" -w, --workload-profile | Num | Enable a specific workload profile, see pool below | -w 3",
" -n, --kernel-accel | Num | Manual workload tuning, set outerloop step size to X | -n 64",
" -u, --kernel-loops | Num | Manual workload tuning, set innerloop step size to X | -u 256",
+ " --nvidia-spin-damp | Num | Workaround NVidias CPU burning loop bug, in percent | --nvidia-spin-damp=50",
" --gpu-temp-disable | | Disable temperature and fanspeed reads and triggers |",
#ifdef HAVE_HWMON
" --gpu-temp-abort | Num | Abort if GPU temperature reaches X degrees celsius | --gpu-temp-abort=100",
" 22 | Juniper Netscreen/SSG (ScreenOS) | Operating-Systems",
" 501 | Juniper IVE | Operating-Systems",
" 5800 | Android PIN | Operating-Systems",
+ " 13800 | Windows 8+ phone PIN/Password | Operating-Systems",
" 8100 | Citrix Netscaler | Operating-Systems",
" 8500 | RACF | Operating-Systems",
" 7200 | GRUB 2 | Operating-Systems",
}
}
+static void process_stdout (hc_device_param_t *device_param, const uint pws_cnt)
+{
+ char out_buf[HCBUFSIZ] = { 0 };
+
+ uint plain_buf[16] = { 0 };
+
+ u8 *plain_ptr = (u8 *) plain_buf;
+
+ uint plain_len = 0;
+
+ const uint il_cnt = device_param->kernel_params_buf32[27]; // ugly, i know
+
+ if (data.attack_mode == ATTACK_MODE_STRAIGHT)
+ {
+ pw_t pw;
+
+ for (uint gidvid = 0; gidvid < pws_cnt; gidvid++)
+ {
+ gidd_to_pw_t (device_param, gidvid, &pw);
+
+ const uint pos = device_param->innerloop_pos;
+
+ for (uint il_pos = 0; il_pos < il_cnt; il_pos++)
+ {
+ for (int i = 0; i < 8; i++)
+ {
+ plain_buf[i] = pw.i[i];
+ }
+
+ plain_len = pw.pw_len;
+
+ plain_len = apply_rules (data.kernel_rules_buf[pos + il_pos].cmds, &plain_buf[0], &plain_buf[4], plain_len);
+
+ if (plain_len > data.pw_max) plain_len = data.pw_max;
+
+ format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ }
+ }
+ }
+ else if (data.attack_mode == ATTACK_MODE_COMBI)
+ {
+ pw_t pw;
+
+ for (uint gidvid = 0; gidvid < pws_cnt; gidvid++)
+ {
+ gidd_to_pw_t (device_param, gidvid, &pw);
+
+ for (uint il_pos = 0; il_pos < il_cnt; il_pos++)
+ {
+ for (int i = 0; i < 8; i++)
+ {
+ plain_buf[i] = pw.i[i];
+ }
+
+ plain_len = pw.pw_len;
+
+ char *comb_buf = (char *) device_param->combs_buf[il_pos].i;
+ uint comb_len = device_param->combs_buf[il_pos].pw_len;
+
+ if (data.combs_mode == COMBINATOR_MODE_BASE_LEFT)
+ {
+ memcpy (plain_ptr + plain_len, comb_buf, comb_len);
+ }
+ else
+ {
+ memmove (plain_ptr + comb_len, plain_ptr, plain_len);
+
+ memcpy (plain_ptr, comb_buf, comb_len);
+ }
+
+ plain_len += comb_len;
+
+ if (data.pw_max != PW_DICTMAX1)
+ {
+ if (plain_len > data.pw_max) plain_len = data.pw_max;
+ }
+
+ format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ }
+ }
+ }
+ else if (data.attack_mode == ATTACK_MODE_BF)
+ {
+ for (uint gidvid = 0; gidvid < pws_cnt; gidvid++)
+ {
+ for (uint il_pos = 0; il_pos < il_cnt; il_pos++)
+ {
+ u64 l_off = device_param->kernel_params_mp_l_buf64[3] + gidvid;
+ u64 r_off = device_param->kernel_params_mp_r_buf64[3] + il_pos;
+
+ uint l_start = device_param->kernel_params_mp_l_buf32[5];
+ uint r_start = device_param->kernel_params_mp_r_buf32[5];
+
+ uint l_stop = device_param->kernel_params_mp_l_buf32[4];
+ uint r_stop = device_param->kernel_params_mp_r_buf32[4];
+
+ sp_exec (l_off, (char *) plain_ptr + l_start, data.root_css_buf, data.markov_css_buf, l_start, l_start + l_stop);
+ sp_exec (r_off, (char *) plain_ptr + r_start, data.root_css_buf, data.markov_css_buf, r_start, r_start + r_stop);
+
+ plain_len = data.css_cnt;
+
+ format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ }
+ }
+ }
+ else if (data.attack_mode == ATTACK_MODE_HYBRID1)
+ {
+ pw_t pw;
+
+ for (uint gidvid = 0; gidvid < pws_cnt; gidvid++)
+ {
+ gidd_to_pw_t (device_param, gidvid, &pw);
+
+ for (uint il_pos = 0; il_pos < il_cnt; il_pos++)
+ {
+ for (int i = 0; i < 8; i++)
+ {
+ plain_buf[i] = pw.i[i];
+ }
+
+ plain_len = pw.pw_len;
+
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
+
+ uint start = 0;
+ uint stop = device_param->kernel_params_mp_buf32[4];
+
+ sp_exec (off, (char *) plain_ptr + plain_len, data.root_css_buf, data.markov_css_buf, start, start + stop);
+
+ plain_len += start + stop;
+
+ format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ }
+ }
+ }
+ else if (data.attack_mode == ATTACK_MODE_HYBRID2)
+ {
+ pw_t pw;
+
+ for (uint gidvid = 0; gidvid < pws_cnt; gidvid++)
+ {
+ gidd_to_pw_t (device_param, gidvid, &pw);
+
+ for (uint il_pos = 0; il_pos < il_cnt; il_pos++)
+ {
+ for (int i = 0; i < 8; i++)
+ {
+ plain_buf[i] = pw.i[i];
+ }
+
+ plain_len = pw.pw_len;
+
+ u64 off = device_param->kernel_params_mp_buf64[3] + il_pos;
+
+ uint start = 0;
+ uint stop = device_param->kernel_params_mp_buf32[4];
+
+ memmove (plain_ptr + stop, plain_ptr, plain_len);
+
+ sp_exec (off, (char *) plain_ptr, data.root_css_buf, data.markov_css_buf, start, start + stop);
+
+ plain_len += start + stop;
+
+ format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ }
+ }
+ }
+}
+
static void save_hash ()
{
char *hashfile = data.hashfile;
if (data.hash_mode != 2500)
{
- char out_buf[HCBUFSIZ] = { 0 };
-
if (data.username == 1)
{
user_t *user = data.hash_info[idx]->user;
fputc (separator, fp);
}
+ char out_buf[HCBUFSIZ]; // scratch buffer
+
+ out_buf[0] = 0;
+
ascii_digest (out_buf, salt_pos, digest_pos);
fputs (out_buf, fp);
- log_out (fp, "");
+ fputc ('\n', fp);
}
else
{
unlink (old_hashfile);
}
-static float find_kernel_power_div (const u64 total_left, const uint kernel_power_all)
-{
- // function called only in case kernel_power_all > words_left
-
- float kernel_power_div = (float) (total_left) / kernel_power_all;
-
- kernel_power_div += kernel_power_div / 100;
-
- u32 kernel_power_new = (u32) (kernel_power_all * kernel_power_div);
-
- while (kernel_power_new < total_left)
- {
- kernel_power_div += kernel_power_div / 100;
-
- kernel_power_new = (u32) (kernel_power_all * kernel_power_div);
- }
-
- if (data.quiet == 0)
- {
- clear_prompt ();
-
- //log_info ("");
-
- log_info ("INFO: approaching final keyspace, workload adjusted");
- log_info ("");
-
- fprintf (stdout, "%s", PROMPT);
-
- fflush (stdout);
- }
-
- //if ((kernel_power_all * kernel_power_div) < 8) return 1;
-
- return kernel_power_div;
-}
-
-static void run_kernel (const uint kern_run, hc_device_param_t *device_param, const uint num, const uint event_update)
+static void run_kernel (const uint kern_run, hc_device_param_t *device_param, const uint num, const uint event_update, const uint iteration)
{
uint num_elements = num;
hc_clFlush (data.ocl, device_param->command_queue);
+ if (device_param->nvidia_spin_damp)
+ {
+ if (data.devices_status == STATUS_RUNNING)
+ {
+ if (iteration < EXPECTED_ITERATIONS)
+ {
+ switch (kern_run)
+ {
+ case KERN_RUN_1: if (device_param->exec_us_prev1[iteration]) usleep (device_param->exec_us_prev1[iteration] * device_param->nvidia_spin_damp); break;
+ case KERN_RUN_2: if (device_param->exec_us_prev2[iteration]) usleep (device_param->exec_us_prev2[iteration] * device_param->nvidia_spin_damp); break;
+ case KERN_RUN_3: if (device_param->exec_us_prev3[iteration]) usleep (device_param->exec_us_prev3[iteration] * device_param->nvidia_spin_damp); break;
+ }
+ }
+ }
+ }
+
hc_clWaitForEvents (data.ocl, 1, &event);
- if (event_update)
- {
- cl_ulong time_start;
- cl_ulong time_end;
+ cl_ulong time_start;
+ cl_ulong time_end;
- hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_START, sizeof (time_start), &time_start, NULL);
- hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_END, sizeof (time_end), &time_end, NULL);
+ hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_START, sizeof (time_start), &time_start, NULL);
+ hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_END, sizeof (time_end), &time_end, NULL);
- const double exec_time = (double) (time_end - time_start) / 1000000.0;
+ const double exec_us = (double) (time_end - time_start) / 1000;
+ if (data.devices_status == STATUS_RUNNING)
+ {
+ if (iteration < EXPECTED_ITERATIONS)
+ {
+ switch (kern_run)
+ {
+ case KERN_RUN_1: device_param->exec_us_prev1[iteration] = exec_us; break;
+ case KERN_RUN_2: device_param->exec_us_prev2[iteration] = exec_us; break;
+ case KERN_RUN_3: device_param->exec_us_prev3[iteration] = exec_us; break;
+ }
+ }
+ }
+
+ if (event_update)
+ {
uint exec_pos = device_param->exec_pos;
- device_param->exec_ms[exec_pos] = exec_time;
+ device_param->exec_ms[exec_pos] = exec_us / 1000;
exec_pos++;
*/
}
-static void choose_kernel (hc_device_param_t *device_param, const uint attack_exec, const uint attack_mode, const uint opts_type, const salt_t *salt_buf, const uint highest_pw_len, const uint pws_cnt)
+static void choose_kernel (hc_device_param_t *device_param, const uint attack_exec, const uint attack_mode, const uint opts_type, const salt_t *salt_buf, const uint highest_pw_len, const uint pws_cnt, const uint fast_iteration)
{
+ if (data.hash_mode == 2000)
+ {
+ process_stdout (device_param, pws_cnt);
+
+ return;
+ }
+
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (attack_mode == ATTACK_MODE_BF)
if (highest_pw_len < 16)
{
- run_kernel (KERN_RUN_1, device_param, pws_cnt, true);
+ run_kernel (KERN_RUN_1, device_param, pws_cnt, true, fast_iteration);
}
else if (highest_pw_len < 32)
{
- run_kernel (KERN_RUN_2, device_param, pws_cnt, true);
+ run_kernel (KERN_RUN_2, device_param, pws_cnt, true, fast_iteration);
}
else
{
- run_kernel (KERN_RUN_3, device_param, pws_cnt, true);
+ run_kernel (KERN_RUN_3, device_param, pws_cnt, true, fast_iteration);
}
}
else
{
run_kernel_amp (device_param, pws_cnt);
- run_kernel (KERN_RUN_1, device_param, pws_cnt, false);
+ run_kernel (KERN_RUN_1, device_param, pws_cnt, false, 0);
if (opts_type & OPTS_TYPE_HOOK12)
{
- run_kernel (KERN_RUN_12, device_param, pws_cnt, false);
+ run_kernel (KERN_RUN_12, device_param, pws_cnt, false, 0);
+
+ hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+
+ // do something with data
+
+ hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
}
uint iter = salt_buf->salt_iter;
uint loop_step = device_param->kernel_loops;
- for (uint loop_pos = 0; loop_pos < iter; loop_pos += loop_step)
+ for (uint loop_pos = 0, slow_iteration = 0; loop_pos < iter; loop_pos += loop_step, slow_iteration++)
{
uint loop_left = iter - loop_pos;
device_param->kernel_params_buf32[25] = loop_pos;
device_param->kernel_params_buf32[26] = loop_left;
- run_kernel (KERN_RUN_2, device_param, pws_cnt, true);
+ run_kernel (KERN_RUN_2, device_param, pws_cnt, true, slow_iteration);
if (data.devices_status == STATUS_CRACKED) break;
if (data.devices_status == STATUS_ABORTED) break;
if (opts_type & OPTS_TYPE_HOOK23)
{
- run_kernel (KERN_RUN_23, device_param, pws_cnt, false);
+ run_kernel (KERN_RUN_23, device_param, pws_cnt, false, 0);
hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
}
- run_kernel (KERN_RUN_3, device_param, pws_cnt, false);
+ run_kernel (KERN_RUN_3, device_param, pws_cnt, false, 0);
}
}
if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
- run_kernel (KERN_RUN_1, device_param, kernel_power_try, true);
+ run_kernel (KERN_RUN_1, device_param, kernel_power_try, true, 0);
}
else
{
- run_kernel (KERN_RUN_2, device_param, kernel_power_try, true);
+ run_kernel (KERN_RUN_2, device_param, kernel_power_try, true, 0);
}
const double exec_ms_prev = get_avg_exec_time (device_param, 1);
if ((kernel_loops_min == kernel_loops_max) && (kernel_accel_min == kernel_accel_max))
{
- try_run (device_param, kernel_accel, kernel_loops);
- try_run (device_param, kernel_accel, kernel_loops);
- try_run (device_param, kernel_accel, kernel_loops);
- try_run (device_param, kernel_accel, kernel_loops);
+ if (data.hash_mode != 2000)
+ {
+ try_run (device_param, kernel_accel, kernel_loops);
+ try_run (device_param, kernel_accel, kernel_loops);
+ try_run (device_param, kernel_accel, kernel_loops);
+ try_run (device_param, kernel_accel, kernel_loops);
+ }
device_param->kernel_accel = kernel_accel;
device_param->kernel_loops = kernel_loops;
memset (device_param->exec_ms, 0, EXEC_CACHE * sizeof (double));
+ memset (device_param->exec_us_prev1, 0, EXPECTED_ITERATIONS * sizeof (double));
+ memset (device_param->exec_us_prev2, 0, EXPECTED_ITERATIONS * sizeof (double));
+ memset (device_param->exec_us_prev3, 0, EXPECTED_ITERATIONS * sizeof (double));
+
// store
device_param->kernel_accel = kernel_accel;
if (data.devices_status == STATUS_QUIT) break;
if (data.devices_status == STATUS_BYPASS) break;
+ uint fast_iteration = 0;
+
uint innerloop_left = innerloop_cnt - innerloop_pos;
- if (innerloop_left > innerloop_step) innerloop_left = innerloop_step;
+ if (innerloop_left > innerloop_step)
+ {
+ innerloop_left = innerloop_step;
+
+ fast_iteration = 1;
+ }
device_param->innerloop_pos = innerloop_pos;
device_param->innerloop_left = innerloop_left;
hc_timer_set (&device_param->timer_speed);
}
- choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt);
+ choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt, fast_iteration);
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
* result
*/
- check_cracked (device_param, salt_pos);
+ if (data.benchmark == 0)
+ {
+ check_cracked (device_param, salt_pos);
+ }
/**
* progress
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef WIN
+ hm_set_fanspeed_with_device_id_nvapi (device_id, fan_speed_new, 1);
+ #endif
+ #ifdef LINUX
+ hm_set_fanspeed_with_device_id_xnvctrl (device_id, fan_speed_new);
+ #endif
}
fan_speed_chgd[device_id] = 1;
//}
}
-static u32 get_power (const u32 kernel_power)
+static void set_kernel_power_final (const u64 kernel_power_final)
+{
+ if (data.quiet == 0)
+ {
+ clear_prompt ();
+
+ //log_info ("");
+
+ log_info ("INFO: approaching final keyspace, workload adjusted");
+ log_info ("");
+
+ fprintf (stdout, "%s", PROMPT);
+
+ fflush (stdout);
+ }
+
+ data.kernel_power_final = kernel_power_final;
+}
+
+static u32 get_power (hc_device_param_t *device_param)
{
- if (data.kernel_power_div)
+ const u64 kernel_power_final = data.kernel_power_final;
+
+ if (kernel_power_final)
{
- return (float) kernel_power * data.kernel_power_div;
+ const double device_factor = (double) device_param->hardware_power / data.hardware_power_all;
+
+ const u64 words_left_device = CEIL ((double) kernel_power_final * device_factor);
+
+ // work should be at least the hardware power available without any accelerator
+
+ const u64 work = MAX (words_left_device, device_param->hardware_power);
+
+ return work;
}
- return kernel_power;
+ return device_param->kernel_power;
}
static uint get_work (hc_device_param_t *device_param, const u64 max)
device_param->words_off = words_cur;
+ const u64 kernel_power_all = data.kernel_power_all;
+
const u64 words_left = words_base - words_cur;
- if (data.kernel_power_all > words_left)
+ if (words_left < kernel_power_all)
{
- if (data.kernel_power_div == 0)
+ if (data.kernel_power_final == 0)
{
- data.kernel_power_div = find_kernel_power_div (words_left, data.kernel_power_all);
+ set_kernel_power_final (words_left);
}
}
- const u32 kernel_power = get_power (device_param->kernel_power);
+ const u32 kernel_power = get_power (device_param);
uint work = MIN (words_left, kernel_power);
uint words_cur = 0;
- while (words_cur < get_power (device_param->kernel_power))
+ while (words_cur < device_param->kernel_power)
{
char *line_buf = fgets (buf, HCBUFSIZ - 1, stdin);
continue;
}
+ // hmm that's always the case, or?
+
if (attack_kern == ATTACK_KERN_STRAIGHT)
{
if ((line_len < data.pw_min) || (line_len > data.pw_max))
continue;
}
}
- else if (attack_kern == ATTACK_KERN_COMBI)
- {
- // do not check if minimum restriction is satisfied (line_len >= data.pw_min) here
- // since we still need to combine the plains
-
- if (line_len > data.pw_max)
- {
- hc_thread_mutex_lock (mux_counter);
-
- for (uint salt_pos = 0; salt_pos < data.salts_cnt; salt_pos++)
- {
- data.words_progress_rejected[salt_pos] += data.combs_cnt;
- }
-
- hc_thread_mutex_unlock (mux_counter);
-
- continue;
- }
- }
pw_add (device_param, (u8 *) line_buf, line_len);
if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
- run_kernel (KERN_RUN_1, device_param, 1, false);
+ run_kernel (KERN_RUN_1, device_param, 1, false, 0);
}
else
{
- run_kernel (KERN_RUN_1, device_param, 1, false);
+ run_kernel (KERN_RUN_1, device_param, 1, false, 0);
uint loop_step = 16;
device_param->kernel_params_buf32[25] = loop_pos;
device_param->kernel_params_buf32[26] = loop_left;
- run_kernel (KERN_RUN_2, device_param, 1, false);
+ run_kernel (KERN_RUN_2, device_param, 1, false, 0);
}
- run_kernel (KERN_RUN_3, device_param, 1, false);
+ run_kernel (KERN_RUN_3, device_param, 1, false, 0);
}
/**
for (uint i = 0; i < digests_cnt; i++)
{
- if (data.digests_shown[i] == 1) continue; // can happen with potfile
-
uint *digest_ptr = (uint *) digests_buf_ptr;
digests_buf_ptr += dgst_size;
* main
*/
-#ifdef _WIN
+#ifdef WIN
void SetConsoleWindowSize (const int x)
{
HANDLE h = GetStdHandle (STD_OUTPUT_HANDLE);
int main (int argc, char **argv)
{
- #ifdef _WIN
+ #ifdef WIN
SetConsoleWindowSize (132);
#endif
uint version = VERSION;
uint quiet = QUIET;
uint benchmark = BENCHMARK;
+ uint stdout_flag = STDOUT_FLAG;
uint show = SHOW;
uint left = LEFT;
uint username = USERNAME;
uint workload_profile = WORKLOAD_PROFILE;
uint kernel_accel = KERNEL_ACCEL;
uint kernel_loops = KERNEL_LOOPS;
+ uint nvidia_spin_damp = NVIDIA_SPIN_DAMP;
uint gpu_temp_disable = GPU_TEMP_DISABLE;
#ifdef HAVE_HWMON
uint gpu_temp_abort = GPU_TEMP_ABORT;
#define IDX_FORCE 0xff08
#define IDX_RUNTIME 0xff09
#define IDX_BENCHMARK 'b'
+ #define IDX_STDOUT_FLAG 0xff77
#define IDX_HASH_MODE 'm'
#define IDX_ATTACK_MODE 'a'
#define IDX_RP_FILE 'r'
#define IDX_WORKLOAD_PROFILE 'w'
#define IDX_KERNEL_ACCEL 'n'
#define IDX_KERNEL_LOOPS 'u'
+ #define IDX_NVIDIA_SPIN_DAMP 0xff79
#define IDX_GPU_TEMP_DISABLE 0xff29
#define IDX_GPU_TEMP_ABORT 0xff30
#define IDX_GPU_TEMP_RETAIN 0xff31
{"outfile-check-dir", required_argument, 0, IDX_OUTFILE_CHECK_DIR},
{"force", no_argument, 0, IDX_FORCE},
{"benchmark", no_argument, 0, IDX_BENCHMARK},
+ {"stdout", no_argument, 0, IDX_STDOUT_FLAG},
{"restore", no_argument, 0, IDX_RESTORE},
{"restore-disable", no_argument, 0, IDX_RESTORE_DISABLE},
{"status", no_argument, 0, IDX_STATUS},
{"workload-profile", required_argument, 0, IDX_WORKLOAD_PROFILE},
{"kernel-accel", required_argument, 0, IDX_KERNEL_ACCEL},
{"kernel-loops", required_argument, 0, IDX_KERNEL_LOOPS},
+ {"nvidia-spin-damp", required_argument, 0, IDX_NVIDIA_SPIN_DAMP},
{"gpu-temp-disable", no_argument, 0, IDX_GPU_TEMP_DISABLE},
#ifdef HAVE_HWMON
{"gpu-temp-abort", required_argument, 0, IDX_GPU_TEMP_ABORT},
uint runtime_chgd = 0;
uint kernel_loops_chgd = 0;
uint kernel_accel_chgd = 0;
+ uint nvidia_spin_damp_chgd = 0;
uint attack_mode_chgd = 0;
uint outfile_format_chgd = 0;
uint rp_gen_seed_chgd = 0;
case IDX_LIMIT: limit = atoll (optarg); break;
case IDX_KEYSPACE: keyspace = 1; break;
case IDX_BENCHMARK: benchmark = 1; break;
+ case IDX_STDOUT_FLAG: stdout_flag = 1; break;
case IDX_RESTORE: break;
case IDX_RESTORE_DISABLE: restore_disable = 1; break;
case IDX_STATUS: status = 1; break;
kernel_accel_chgd = 1; break;
case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
kernel_loops_chgd = 1; break;
+ case IDX_NVIDIA_SPIN_DAMP: nvidia_spin_damp = atoi (optarg);
+ nvidia_spin_damp_chgd = 1; break;
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
#ifdef HAVE_HWMON
case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg); break;
{
log_info ("%s (%s) starting in benchmark-mode...", PROGNAME, VERSION_TAG);
log_info ("");
+ log_info ("Note: Reported benchmark cracking speed = real cracking speed");
+ log_info ("To verify, run hashcat like this: only_one_hash.txt -a 3 -w 3 ?b?b?b?b?b?b?b");
+ log_info ("");
}
else
{
log_info ("%s (%s) starting in restore-mode...", PROGNAME, VERSION_TAG);
log_info ("");
}
+ else if (stdout_flag == 1)
+ {
+ // do nothing
+ }
else
{
log_info ("%s (%s) starting...", PROGNAME, VERSION_TAG);
return (-1);
}
- if (hash_mode_chgd && hash_mode > 13799) // just added to remove compiler warnings for hash_mode_chgd
+ if (hash_mode_chgd && hash_mode > 13800) // just added to remove compiler warnings for hash_mode_chgd
{
log_error ("ERROR: Invalid hash-type specified");
case ATTACK_MODE_HYBRID2: attack_kern = ATTACK_KERN_COMBI; break;
}
- if (benchmark == 0)
+ if (benchmark == 1)
+ {
+ if (myargv[optind] != 0)
+ {
+ log_error ("ERROR: Invalid argument for benchmark mode specified");
+
+ return (-1);
+ }
+
+ if (attack_mode_chgd == 1)
+ {
+ if (attack_mode != ATTACK_MODE_BF)
+ {
+ log_error ("ERROR: Only attack-mode 3 allowed in benchmark mode");
+
+ return (-1);
+ }
+ }
+ }
+ else
{
+ if (stdout_flag == 1) // no hash here
+ {
+ optind--;
+ }
+
if (keyspace == 1)
{
int num_additional_params = 1;
return (-1);
}
}
- else
- {
- if (myargv[optind] != 0)
- {
- log_error ("ERROR: Invalid argument for benchmark mode specified");
-
- return (-1);
- }
-
- if (attack_mode_chgd == 1)
- {
- if (attack_mode != ATTACK_MODE_BF)
- {
- log_error ("ERROR: Only attack-mode 3 allowed in benchmark mode");
-
- return (-1);
- }
- }
- }
if (skip != 0 && limit != 0)
{
quiet = 1;
}
+ if (stdout_flag == 1)
+ {
+ status_timer = 0;
+ restore_timer = 0;
+ restore_disable = 1;
+ restore = 0;
+ potfile_disable = 1;
+ weak_hash_threshold = 0;
+ gpu_temp_disable = 1;
+ hash_mode = 2000;
+ quiet = 1;
+ outfile_format = OUTFILE_FMT_PLAIN;
+ kernel_accel = 1024;
+ kernel_loops = 1024;
+ force = 1;
+ outfile_check_timer = 0;
+ }
+
if (remove_timer_chgd == 1)
{
if (remove == 0)
weak_hash_threshold = 0;
}
+ if (nvidia_spin_damp > 100)
+ {
+ log_error ("ERROR: setting --nvidia-spin-damp must be between 0 and 100 (inclusive)");
+
+ return (-1);
+ }
+
+
/**
* induction directory
*/
logfile_top_uint (attack_mode);
logfile_top_uint (attack_kern);
logfile_top_uint (benchmark);
+ logfile_top_uint (stdout_flag);
logfile_top_uint (bitmap_min);
logfile_top_uint (bitmap_max);
logfile_top_uint (debug_mode);
logfile_top_uint (force);
logfile_top_uint (kernel_accel);
logfile_top_uint (kernel_loops);
+ logfile_top_uint (nvidia_spin_damp);
logfile_top_uint (gpu_temp_disable);
#ifdef HAVE_HWMON
logfile_top_uint (gpu_temp_abort);
restore_disable = 1;
potfile_disable = 1;
weak_hash_threshold = 0;
+ nvidia_spin_damp = 0;
gpu_temp_disable = 1;
+ outfile_check_timer = 0;
#ifdef HAVE_HWMON
- powertune_enable = 1;
+ if (powertune_enable == 1)
+ {
+ gpu_temp_disable = 0;
+ }
#endif
- data.status_timer = status_timer;
- data.restore_timer = restore_timer;
- data.restore_disable = restore_disable;
+ data.status_timer = status_timer;
+ data.restore_timer = restore_timer;
+ data.restore_disable = restore_disable;
+ data.outfile_check_timer = outfile_check_timer;
/**
* force attack mode to be bruteforce
dgst_pos3 = 3;
break;
+ case 2000: hash_type = HASH_TYPE_STDOUT;
+ salt_type = SALT_TYPE_NONE;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = 0;
+ dgst_size = DGST_SIZE_4_4;
+ parse_func = NULL;
+ sort_by_digest = NULL;
+ opti_type = 0;
+ dgst_pos0 = 0;
+ dgst_pos1 = 0;
+ dgst_pos2 = 0;
+ dgst_pos3 = 0;
+ break;
+
case 2100: hash_type = HASH_TYPE_DCC2;
salt_type = SALT_TYPE_EMBEDDED;
attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
dgst_pos3 = 3;
break;
+ case 13800: hash_type = HASH_TYPE_SHA256;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_BE
+ | OPTS_TYPE_PT_UNICODE;
+ kern_type = KERN_TYPE_WIN8PHONE;
+ dgst_size = DGST_SIZE_4_8;
+ parse_func = win8phone_parse_hash;
+ sort_by_digest = sort_by_digest_4_8;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_PRECOMPUTE_INIT
+ | OPTI_TYPE_EARLY_SKIP
+ | OPTI_TYPE_NOT_ITERATED
+ | OPTI_TYPE_RAW_HASH;
+ dgst_pos0 = 3;
+ dgst_pos1 = 7;
+ dgst_pos2 = 2;
+ dgst_pos3 = 6;
+ break;
default: usage_mini_print (PROGNAME); return (-1);
}
case 13761: esalt_size = sizeof (tc_t); break;
case 13762: esalt_size = sizeof (tc_t); break;
case 13763: esalt_size = sizeof (tc_t); break;
+ case 13800: esalt_size = sizeof (win8phone_t); break;
}
data.esalt_size = esalt_size;
break;
case 7400: if (pw_max > 16) pw_max = 16;
break;
- case 7500: if (pw_max > 8) pw_max = 8;
+ case 7700: if (pw_max > 8) pw_max = 8;
break;
case 7900: if (pw_max > 48) pw_max = 48;
break;
uint hashes_avail = 0;
- if (benchmark == 0)
+ if ((benchmark == 0) && (stdout_flag == 0))
{
struct stat f;
{
// useless to read hash file for keyspace, cheat a little bit w/ optind
}
+ else if (stdout_flag == 1)
+ {
+ // useless to read hash file for stdout, cheat a little bit w/ optind
+ }
else if (hashes_avail == 0)
{
}
return (0);
}
- if (keyspace == 0)
+ if ((keyspace == 0) && (stdout_flag == 0))
{
if (hashes_cnt == 0)
{
* OpenCL devices: simply push all devices from all platforms into the same device array
*/
- int need_adl = 0;
- int need_nvapi = 0;
- int need_nvml = 0;
+ int need_adl = 0;
+ int need_nvapi = 0;
+ int need_nvml = 0;
+ int need_xnvctrl = 0;
hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
{
need_nvml = 1;
- #ifdef _WIN
+ #ifdef LINUX
+ need_xnvctrl = 1;
+ #endif
+
+ #ifdef WIN
need_nvapi = 1;
#endif
}
device_param->sm_minor = sm_minor;
device_param->sm_major = sm_major;
+
+ // CPU burning loop damper
+ // Value is given as number between 0-100
+ // By default 100%
+
+ device_param->nvidia_spin_damp = (double) nvidia_spin_damp;
+
+ if (nvidia_spin_damp_chgd == 0)
+ {
+ if (data.attack_mode == ATTACK_MODE_STRAIGHT)
+ {
+ /**
+ * the workaround is not a friend of rule based attacks
+ * the words from the wordlist combined with fast and slow rules cause
+ * fluctuations which cause inaccurate wait time estimations
+ * using a reduced damping percentage almost compensates this
+ */
+
+ device_param->nvidia_spin_damp = 64;
+ }
+ }
+
+ device_param->nvidia_spin_damp /= 100;
}
else
{
*/
#ifdef HAVE_HWMON
- hm_attrs_t hm_adapters_adl[DEVICES_MAX] = { { 0 } };
- hm_attrs_t hm_adapters_nvapi[DEVICES_MAX] = { { 0 } };
- hm_attrs_t hm_adapters_nvml[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_adl[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_nvapi[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_nvml[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_xnvctrl[DEVICES_MAX] = { { 0 } };
if (gpu_temp_disable == 0)
{
- ADL_PTR *adl = (ADL_PTR *) mymalloc (sizeof (ADL_PTR));
- NVAPI_PTR *nvapi = (NVAPI_PTR *) mymalloc (sizeof (NVAPI_PTR));
- NVML_PTR *nvml = (NVML_PTR *) mymalloc (sizeof (NVML_PTR));
+ ADL_PTR *adl = (ADL_PTR *) mymalloc (sizeof (ADL_PTR));
+ NVAPI_PTR *nvapi = (NVAPI_PTR *) mymalloc (sizeof (NVAPI_PTR));
+ NVML_PTR *nvml = (NVML_PTR *) mymalloc (sizeof (NVML_PTR));
+ XNVCTRL_PTR *xnvctrl = (XNVCTRL_PTR *) mymalloc (sizeof (XNVCTRL_PTR));
- data.hm_adl = NULL;
- data.hm_nvapi = NULL;
- data.hm_nvml = NULL;
+ data.hm_adl = NULL;
+ data.hm_nvapi = NULL;
+ data.hm_nvml = NULL;
+ data.hm_xnvctrl = NULL;
if ((need_nvml == 1) && (nvml_init (nvml) == 0))
{
}
}
+ if ((need_xnvctrl == 1) && (xnvctrl_init (xnvctrl) == 0))
+ {
+ data.hm_xnvctrl = xnvctrl;
+ }
+
+ if (data.hm_xnvctrl)
+ {
+ if (hm_XNVCTRL_XOpenDisplay (data.hm_xnvctrl) == 0)
+ {
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+
+ hm_adapters_xnvctrl[device_id].xnvctrl = device_id;
+
+ int speed = 0;
+
+ if (get_fan_speed_current (data.hm_xnvctrl, device_id, &speed) == 0) hm_adapters_xnvctrl[device_id].fan_get_supported = 1;
+ }
+ }
+ }
+
if ((need_adl == 1) && (adl_init (adl) == 0))
{
data.hm_adl = adl;
}
}
- if (data.hm_adl == NULL && data.hm_nvml == NULL)
+ if (data.hm_adl == NULL && data.hm_nvml == NULL && data.hm_xnvctrl == NULL)
{
gpu_temp_disable = 1;
}
*/
#ifdef HAVE_HWMON
- if (gpu_temp_disable == 0 && data.hm_adl == NULL && data.hm_nvml == NULL)
+ if (gpu_temp_disable == 0 && data.hm_adl == NULL && data.hm_nvml == NULL && data.hm_xnvctrl == NULL)
{
log_info ("Watchdog: Hardware Monitoring Interface not found on your system");
}
data.hm_device[device_id].adl = hm_adapters_adl[platform_devices_id].adl;
data.hm_device[device_id].nvapi = 0;
data.hm_device[device_id].nvml = 0;
+ data.hm_device[device_id].xnvctrl = 0;
data.hm_device[device_id].od_version = hm_adapters_adl[platform_devices_id].od_version;
data.hm_device[device_id].fan_get_supported = hm_adapters_adl[platform_devices_id].fan_get_supported;
- data.hm_device[device_id].fan_set_supported = hm_adapters_adl[platform_devices_id].fan_set_supported;
+ data.hm_device[device_id].fan_set_supported = 0;
}
if (device_param->device_vendor_id == VENDOR_ID_NV)
data.hm_device[device_id].adl = 0;
data.hm_device[device_id].nvapi = hm_adapters_nvapi[platform_devices_id].nvapi;
data.hm_device[device_id].nvml = hm_adapters_nvml[platform_devices_id].nvml;
+ data.hm_device[device_id].xnvctrl = hm_adapters_xnvctrl[platform_devices_id].xnvctrl;
data.hm_device[device_id].od_version = 0;
data.hm_device[device_id].fan_get_supported = hm_adapters_nvml[platform_devices_id].fan_get_supported;
data.hm_device[device_id].fan_set_supported = 0;
device_param->kernel_threads = kernel_threads;
+ device_param->hardware_power = device_processors * kernel_threads;
+
/**
* create input buffers on device : calculate size of fixed memory buffers
*/
* some algorithms need a fixed kernel-loops count
*/
- if (hash_mode == 1500)
+ if (hash_mode == 1500 && attack_mode == ATTACK_MODE_BF)
{
const u32 kernel_loops_fixed = 1024;
device_param->kernel_loops_max = kernel_loops_fixed;
}
- if (hash_mode == 3000)
+ if (hash_mode == 3000 && attack_mode == ATTACK_MODE_BF)
{
const u32 kernel_loops_fixed = 1024;
if ((opts_type & OPTS_TYPE_HOOK12) || (opts_type & OPTS_TYPE_HOOK23))
{
- // none yet
+ switch (hash_mode)
+ {
+ }
}
// now check if all device-memory sizes which depend on the kernel_accel_max amplifier are within its boundaries
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef LINUX
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_TRUE);
+ #endif
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, fanspeed, 1);
+ #endif
}
if (rc == 0)
if (data.devices_status != STATUS_CRACKED) data.devices_status = STATUS_STARTING;
- hc_thread_t i_thread = 0;
+ uint i_threads_cnt = 0;
+
+ hc_thread_t *i_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
- hc_thread_create (i_thread, thread_keypress, &benchmark);
+ if (stdout_flag == 0)
+ {
+ hc_thread_create (i_threads[i_threads_cnt], thread_keypress, &benchmark);
+
+ i_threads_cnt++;
+ }
}
if (wordlist_mode == WL_MODE_STDIN) data.status = 1;
hc_thread_t *ni_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
- hc_thread_create (ni_threads[ni_threads_cnt], thread_monitor, NULL);
+ if (stdout_flag == 0)
+ {
+ hc_thread_create (ni_threads[ni_threads_cnt], thread_monitor, NULL);
- ni_threads_cnt++;
+ ni_threads_cnt++;
+ }
/**
* Outfile remove
data.ms_paused = 0;
- data.kernel_power_div = 0;
+ data.kernel_power_final = 0;
data.words_cur = rd->words_cur;
* create autotune threads
*/
- data.devices_status = STATUS_AUTOTUNE;
-
hc_thread_t *c_threads = (hc_thread_t *) mycalloc (data.devices_cnt, sizeof (hc_thread_t));
+ data.devices_status = STATUS_AUTOTUNE;
+
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &devices_param[device_id];
* Inform user about possible slow speeds
*/
+ uint hardware_power_all = 0;
+
uint kernel_power_all = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
hc_device_param_t *device_param = &devices_param[device_id];
+ hardware_power_all += device_param->hardware_power;
+
kernel_power_all += device_param->kernel_power;
}
+ data.hardware_power_all = hardware_power_all; // hardware_power_all is the same as kernel_power_all but without the influence of kernel_accel on the devices
+
data.kernel_power_all = kernel_power_all;
if ((wordlist_mode == WL_MODE_FILE) || (wordlist_mode == WL_MODE_MASK))
// wait for interactive threads
- if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
+ for (uint thread_idx = 0; thread_idx < i_threads_cnt; thread_idx++)
{
- hc_thread_wait (1, &i_thread);
+ hc_thread_wait (1, &i_threads[thread_idx]);
}
+ local_free (i_threads);
+
// we dont need restore file anymore
if (data.restore_disable == 0)
{
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef LINUX
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_FALSE);
+ #endif
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, fanspeed, fanpolicy);
+ #endif
}
if (rc == -1) log_info ("WARNING: Failed to restore default fan speed and policy for device #%", device_id + 1);
data.hm_nvml = NULL;
}
+ if (data.hm_nvapi)
+ {
+ hm_NvAPI_Unload (data.hm_nvapi);
+
+ nvapi_close (data.hm_nvapi);
+
+ data.hm_nvapi = NULL;
+ }
+
+ if (data.hm_xnvctrl)
+ {
+ hm_XNVCTRL_XCloseDisplay (data.hm_xnvctrl);
+
+ xnvctrl_close (data.hm_xnvctrl);
+
+ data.hm_xnvctrl = NULL;
+ }
+
if (data.hm_adl)
{
hm_ADL_Main_Control_Destroy (data.hm_adl);