- /**
+/**
* Authors.....: Jens Steube <jens.steube@gmail.com>
* Gabriele Gristina <matrix@hashcat.net>
* magnum <john.magnum@hushmail.com>
* License.....: MIT
*/
-#ifdef OSX
+#ifdef DARWIN
#include <stdio.h>
#endif
* flush
*/
- #ifdef _WIN
- fputc ('\r', out);
- fputc ('\n', out);
- #endif
-
- #ifdef _POSIX
- fputc ('\n', out);
- #endif
-
+ fputs (EOL, out);
fflush (out);
}
if (data.devices_status == STATUS_CRACKED) break;
if (data.devices_status == STATUS_ABORTED) break;
if (data.devices_status == STATUS_QUIT) break;
+ if (data.devices_status == STATUS_BYPASS) break;
/**
* speed
if (data.devices_status == STATUS_CRACKED) break;
if (data.devices_status == STATUS_ABORTED) break;
if (data.devices_status == STATUS_QUIT) break;
+ if (data.devices_status == STATUS_BYPASS) break;
/**
* result
return (p);
}
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while (data.shutdown_inner == 0)
{
hc_sleep (sleep_time);
uint check_left = outfile_check_timer; // or 1 if we want to check it at startup
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while (data.shutdown_inner == 0)
{
hc_sleep (1);
const uint attack_kern = data.attack_kern;
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
hc_thread_mutex_lock (mux_dispatcher);
if (attack_mode == ATTACK_MODE_BF)
{
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
const uint work = get_work (device_param, -1);
u64 words_cur = 0;
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
u64 words_off = 0;
u64 words_fin = 0;
hc_thread_t *outer_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
+ data.shutdown_outer = 0;
+
if (keyspace == 0 && benchmark == 0 && stdout_flag == 0)
{
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
outer_threads_cnt++;
}
-
- hc_thread_create (outer_threads[outer_threads_cnt], thread_monitor, NULL);
-
- outer_threads_cnt++;
}
/**
device_param->device_name_chksum = device_name_chksum;
- // device_processor_cores
+ // vendor specific
if (device_param->device_type & CL_DEVICE_TYPE_GPU)
{
}
}
- // device_processor_cores
-
- if (device_type & CL_DEVICE_TYPE_CPU)
- {
- cl_uint device_processor_cores = 1;
-
- device_param->device_processor_cores = device_processor_cores;
- }
-
if (device_type & CL_DEVICE_TYPE_GPU)
{
- if (device_vendor_id == VENDOR_ID_AMD)
- {
- cl_uint device_processor_cores = 0;
-
- #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
-
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
-
- device_param->device_processor_cores = device_processor_cores;
- }
- else if (device_vendor_id == VENDOR_ID_NV)
+ if (device_vendor_id == VENDOR_ID_NV)
{
cl_uint kernel_exec_timeout = 0;
device_param->kernel_exec_timeout = kernel_exec_timeout;
- cl_uint device_processor_cores = 0;
-
- #define CL_DEVICE_WARP_SIZE_NV 0x4003
-
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
-
- device_param->device_processor_cores = device_processor_cores;
-
cl_uint sm_minor = 0;
cl_uint sm_major = 0;
device_param->nvidia_spin_damp /= 100;
}
- else
- {
- cl_uint device_processor_cores = 1;
-
- device_param->device_processor_cores = device_processor_cores;
- }
}
// display results
const char *device_name_chksum = device_param->device_name_chksum;
const u32 device_processors = device_param->device_processors;
- const u32 device_processor_cores = device_param->device_processor_cores;
/**
* create context for each device
if ((hash_mode == 8900) || (hash_mode == 9300))
{
+ // we need to check that all hashes have the same scrypt settings
+
+ const u32 scrypt_N = data.salts_buf[0].scrypt_N;
+ const u32 scrypt_r = data.salts_buf[0].scrypt_r;
+ const u32 scrypt_p = data.salts_buf[0].scrypt_p;
+
+ for (uint i = 1; i < salts_cnt; i++)
+ {
+ if ((data.salts_buf[i].scrypt_N != scrypt_N)
+ || (data.salts_buf[i].scrypt_r != scrypt_r)
+ || (data.salts_buf[i].scrypt_p != scrypt_p))
+ {
+ log_error ("ERROR: Mixed scrypt settings not supported");
+
+ return -1;
+ }
+ }
+
uint tmto_start = 0;
uint tmto_stop = 10;
}
}
- for (uint tmto = tmto_start; tmto < tmto_stop; tmto++)
- {
- // TODO: in theory the following calculation needs to be done per salt, not global
- // we assume all hashes have the same scrypt settings
+ data.scrypt_tmp_size = (128 * scrypt_r);
+
+ device_param->kernel_accel_min = 1;
+ device_param->kernel_accel_max = 8;
- size_scryptV = (128 * data.salts_buf[0].scrypt_r) * data.salts_buf[0].scrypt_N;
+ uint tmto;
+
+ for (tmto = tmto_start; tmto < tmto_stop; tmto++)
+ {
+ size_scryptV = (128 * scrypt_r) * scrypt_N;
size_scryptV /= 1 << tmto;
- size_scryptV *= device_processors * device_processor_cores;
+ size_scryptV *= device_param->device_processors * device_param->kernel_threads * device_param->kernel_accel_max;
if (size_scryptV > device_param->device_maxmem_alloc)
{
for (uint salts_pos = 0; salts_pos < data.salts_cnt; salts_pos++)
{
- data.salts_buf[salts_pos].scrypt_tmto = tmto;
- data.salts_buf[salts_pos].scrypt_phy = device_processors * device_processor_cores;
+ data.scrypt_tmto_final = tmto;
}
break;
}
- if (data.salts_buf[0].scrypt_phy == 0)
+ if (tmto == tmto_stop)
{
log_error ("ERROR: Can't allocate enough device memory");
return -1;
}
- if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %u\n", data.salts_buf[0].scrypt_tmto, size_scryptV);
+ if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %u\n", data.scrypt_tmto_final, size_scryptV);
}
/**
case 7900: size_tmps = kernel_power_max * sizeof (drupal7_tmp_t); break;
case 8200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha512_tmp_t); break;
case 8800: size_tmps = kernel_power_max * sizeof (androidfde_tmp_t); break;
- case 8900: size_tmps = kernel_power_max * sizeof (scrypt_tmp_t); break;
+ case 8900: size_tmps = kernel_power_max * data.scrypt_tmp_size; break;
case 9000: size_tmps = kernel_power_max * sizeof (pwsafe2_tmp_t); break;
case 9100: size_tmps = kernel_power_max * sizeof (lotus8_tmp_t); break;
case 9200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
- case 9300: size_tmps = kernel_power_max * sizeof (scrypt_tmp_t); break;
+ case 9300: size_tmps = kernel_power_max * data.scrypt_tmp_size; break;
case 9400: size_tmps = kernel_power_max * sizeof (office2007_tmp_t); break;
case 9500: size_tmps = kernel_power_max * sizeof (office2010_tmp_t); break;
case 9600: size_tmps = kernel_power_max * sizeof (office2013_tmp_t); break;
snprintf (build_opts, sizeof (build_opts) - 1, "-I \"%s\"", cpath_real);
- myfree (cpath_real);
-
#else
snprintf (cpath, sizeof (cpath) - 1, "%s/OpenCL/", shared_dir);
snprintf (build_opts, sizeof (build_opts) - 1, "-I %s", cpath_real);
- myfree (cpath_real);
-
#endif
+ // include check
+ // this test needs to be done manually because of osx opencl runtime
+ // if there's a problem with permission, its not reporting back and erroring out silently
+
+ #define files_cnt 15
+
+ const char *files_names[files_cnt] =
+ {
+ "inc_cipher_aes256.cl",
+ "inc_cipher_serpent256.cl",
+ "inc_cipher_twofish256.cl",
+ "inc_common.cl",
+ "inc_comp_multi_bs.cl",
+ "inc_comp_multi.cl",
+ "inc_comp_single_bs.cl",
+ "inc_comp_single.cl",
+ "inc_hash_constants.h",
+ "inc_hash_functions.cl",
+ "inc_rp.cl",
+ "inc_rp.h",
+ "inc_simd.cl",
+ "inc_types.cl",
+ "inc_vendor.cl",
+ };
+
+ for (int i = 0; i < files_cnt; i++)
+ {
+ char path[1024] = { 0 };
+
+ snprintf (path, sizeof (path) - 1, "%s/%s", cpath_real, files_names[i]);
+
+ FILE *fd = fopen (path, "r");
+
+ if (fd == NULL)
+ {
+ log_error ("ERROR: %s: fopen(): %s", path, strerror (errno));
+
+ return -1;
+ }
+
+ char buf[1];
+
+ size_t n = fread (buf, 1, 1, fd);
+
+ if (n != 1)
+ {
+ log_error ("ERROR: %s: fread(): %s", path, strerror (errno));
+
+ return -1;
+ }
+
+ fclose (fd);
+ }
+
+ myfree (cpath_real);
+
// we don't have sm_* on vendors not NV but it doesn't matter
char build_opts_new[1024] = { 0 };
- snprintf (build_opts_new, sizeof (build_opts_new) - 1, "%s -D VENDOR_ID=%u -D CUDA_ARCH=%d -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D KERN_TYPE=%u -D _unroll -cl-std=CL1.1", build_opts, device_param->device_vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type, kern_type);
+ snprintf (build_opts_new, sizeof (build_opts_new) - 1, "%s -D VENDOR_ID=%u -D CUDA_ARCH=%d -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D DGST_R0=%u -D DGST_R1=%u -D DGST_R2=%u -D DGST_R3=%u -D DGST_ELEM=%u -D KERN_TYPE=%u -D _unroll -cl-std=CL1.1", build_opts, device_param->device_vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type, data.dgst_pos0, data.dgst_pos1, data.dgst_pos2, data.dgst_pos3, data.dgst_size / 4, kern_type);
strncpy (build_opts, build_opts_new, sizeof (build_opts));
if (force_jit_compilation == 1500)
{
- snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DDESCRYPT_SALT=%d", build_opts, data.salts_buf[0].salt_buf[0]);
+ snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DDESCRYPT_SALT=%u", build_opts, data.salts_buf[0].salt_buf[0]);
}
else if (force_jit_compilation == 8900)
{
- snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DSCRYPT_N=%d -DSCRYPT_R=%d -DSCRYPT_P=%d -DSCRYPT_TMTO=%d", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.salts_buf[0].scrypt_tmto);
+ snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%u -DSCRYPT_TMP_ELEM=%u", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.scrypt_tmto_final, data.scrypt_tmp_size / 16);
}
else
{
* status and monitor threads
*/
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
data.devices_status = STATUS_STARTING;
}
hc_thread_t *inner_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
+ data.shutdown_inner = 0;
+
/**
* Outfile remove
*/
if (keyspace == 0 && benchmark == 0 && stdout_flag == 0)
{
+ hc_thread_create (inner_threads[inner_threads_cnt], thread_monitor, NULL);
+
+ inner_threads_cnt++;
+
if (outfile_check_timer != 0)
{
if (data.outfile_check_directory != NULL)
logfile_sub_msg ("START");
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
data.devices_status = STATUS_INIT;
}
hc_thread_t *c_threads = (hc_thread_t *) mycalloc (data.devices_cnt, sizeof (hc_thread_t));
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
data.devices_status = STATUS_AUTOTUNE;
}
* create cracker threads
*/
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
data.devices_status = STATUS_RUNNING;
}
local_free (c_threads);
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_EXHAUSTED;
+ }
+
logfile_sub_var_uint ("status-after-work", data.devices_status);
data.restore = 0;
data.devices_status = STATUS_RUNNING;
}
+ // and overwrite benchmark aborts as well
+
+ if (data.benchmark == 1)
+ {
+ if (data.devices_status == STATUS_ABORTED)
+ {
+ data.devices_status = STATUS_RUNNING;
+ }
+ }
+
// finalize task
if (data.devices_status == STATUS_CRACKED) break;
}
}
- // wait for non-interactive threads
+ // wait for inner threads
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
- {
- data.devices_status = STATUS_EXHAUSTED;
- }
+ data.shutdown_inner = 1;
for (uint thread_idx = 0; thread_idx < inner_threads_cnt; thread_idx++)
{
#ifdef HAVE_HWMON
if (gpu_temp_disable == 0)
{
- if (gpu_temp_retain != 0) // VENDOR_ID_AMD is implied here
+ if (gpu_temp_retain != 0)
{
hc_thread_mutex_lock (mux_adl);
// reset power tuning
- if (powertune_enable == 1) // VENDOR_ID_AMD is implied here
+ if (powertune_enable == 1)
{
hc_thread_mutex_lock (mux_adl);
if (data.devices_status == STATUS_QUIT) break;
}
- // wait for interactive threads
+ // wait for outer threads
+
+ data.shutdown_outer = 1;
for (uint thread_idx = 0; thread_idx < outer_threads_cnt; thread_idx++)
{