- /**
+/**
* Authors.....: Jens Steube <jens.steube@gmail.com>
* Gabriele Gristina <matrix@hashcat.net>
* magnum <john.magnum@hushmail.com>
* License.....: MIT
*/
-#ifdef OSX
+#ifdef __APPLE__
+#include <stdio.h>
+#endif
+
+#ifdef __FreeBSD__
#include <stdio.h>
#endif
#define SEPARATOR ':'
#define BITMAP_MIN 16
#define BITMAP_MAX 24
+#define NVIDIA_SPIN_DAMP 100
#define GPU_TEMP_DISABLE 0
#define GPU_TEMP_ABORT 90
-#define GPU_TEMP_RETAIN 0
+#define GPU_TEMP_RETAIN 75
#define WORKLOAD_PROFILE 2
#define KERNEL_ACCEL 0
#define KERNEL_LOOPS 0
#define KERNEL_COMBS 1024
#define KERNEL_BFS 1024
#define KERNEL_THREADS_MAX 256
-#define KERNEL_THREADS_MAX_CPU 16
+#define KERNEL_THREADS_MAX_CPU 1
#define POWERTUNE_ENABLE 0
#define LOGFILE_DISABLE 0
#define SCRYPT_TMTO 0
"- [ Options ] -",
"",
" Options Short / Long | Type | Description | Example",
- "===============================|======|======================================================|=======================",
+ "===============================+======+======================================================+=======================",
" -m, --hash-type | Num | Hash-type, see references below | -m 1000",
" -a, --attack-mode | Num | Attack-mode, see references below | -a 3",
" -V, --version | | Print version |",
" --outfile-check-timer | Num | Sets seconds between outfile checks to X | --outfile-check=30",
" -p, --separator | Char | Separator char for hashlists and outfile | -p :",
" --stdout | | Do not crack a hash, instead print candidates only |",
- " --show | | Show cracked passwords only |",
- " --left | | Show un-cracked passwords only |",
+ " --show | | Compare hashlist with potfile; Show cracked hashes |",
+ " --left | | Compare hashlist with potfile; Show uncracked hashes |",
" --username | | Enable ignoring of usernames in hashfile |",
" --remove | | Enable remove of hash once it is cracked |",
" --remove-timer | Num | Update input hash file each X seconds | --remove-timer=30",
" --cpu-affinity | Str | Locks to CPU devices, separate with comma | --cpu-affinity=1,2,3",
" --opencl-platforms | Str | OpenCL platforms to use, separate with comma | --opencl-platforms=2",
" -d, --opencl-devices | Str | OpenCL devices to use, separate with comma | -d 1",
- " --opencl-device-types | Str | OpenCL device-types to use, separate with comma | --opencl-device-type=1",
+ " -D, --opencl-device-types | Str | OpenCL device-types to use, separate with comma | -D 1",
" --opencl-vector-width | Num | Manual override OpenCL vector-width to X | --opencl-vector=4",
" -w, --workload-profile | Num | Enable a specific workload profile, see pool below | -w 3",
" -n, --kernel-accel | Num | Manual workload tuning, set outerloop step size to X | -n 64",
" -u, --kernel-loops | Num | Manual workload tuning, set innerloop step size to X | -u 256",
+ " --nvidia-spin-damp | Num | Workaround NVidias CPU burning loop bug, in percent | --nvidia-spin-damp=50",
" --gpu-temp-disable | | Disable temperature and fanspeed reads and triggers |",
#ifdef HAVE_HWMON
" --gpu-temp-abort | Num | Abort if GPU temperature reaches X degrees celsius | --gpu-temp-abort=100",
" -s, --skip | Num | Skip X words from the start | -s 1000000",
" -l, --limit | Num | Limit X words from the start + skipped words | -l 1000000",
" --keyspace | | Show keyspace base:mod values and quit |",
- " -j, --rule-left | Rule | Single Rule applied to each word from left wordlist | -j 'c'",
- " -k, --rule-right | Rule | Single Rule applied to each word from right wordlist | -k '^-'",
- " -r, --rules-file | File | Multiple Rules applied to each word from wordlists | -r rules/best64.rule",
+ " -j, --rule-left | Rule | Single rule applied to each word from left wordlist | -j 'c'",
+ " -k, --rule-right | Rule | Single rule applied to each word from right wordlist | -k '^-'",
+ " -r, --rules-file | File | Multiple rules applied to each word from wordlists | -r rules/best64.rule",
" -g, --generate-rules | Num | Generate X random rules | -g 10000",
" --generate-rules-func-min | Num | Force min X funcs per rule |",
" --generate-rules-func-max | Num | Force max X funcs per rule |",
" 40 | md5($salt.unicode($pass)) | Raw Hash, Salted and / or Iterated",
" 3800 | md5($salt.$pass.$salt) | Raw Hash, Salted and / or Iterated",
" 3710 | md5($salt.md5($pass)) | Raw Hash, Salted and / or Iterated",
- " 2600 | md5(md5($pass) | Raw Hash, Salted and / or Iterated",
+ " 2600 | md5(md5($pass)) | Raw Hash, Salted and / or Iterated",
" 4300 | md5(strtoupper(md5($pass))) | Raw Hash, Salted and / or Iterated",
" 4400 | md5(sha1($pass)) | Raw Hash, Salted and / or Iterated",
" 110 | sha1($pass.$salt) | Raw Hash, Salted and / or Iterated",
" 120 | sha1($salt.$pass) | Raw Hash, Salted and / or Iterated",
" 130 | sha1(unicode($pass).$salt) | Raw Hash, Salted and / or Iterated",
" 140 | sha1($salt.unicode($pass)) | Raw Hash, Salted and / or Iterated",
- " 4500 | sha1(sha1($pass) | Raw Hash, Salted and / or Iterated",
+ " 4500 | sha1(sha1($pass)) | Raw Hash, Salted and / or Iterated",
" 4700 | sha1(md5($pass)) | Raw Hash, Salted and / or Iterated",
" 4900 | sha1($salt.$pass.$salt) | Raw Hash, Salted and / or Iterated",
" 1410 | sha256($pass.$salt) | Raw Hash, Salted and / or Iterated",
" 3 | High | 96 ms | High | Unresponsive",
" 4 | Nightmare | 480 ms | Insane | Headless",
"",
- "If you have no idea what just happened then visit the following pages:",
+ "- [ Basic Examples ] -",
+ "",
+ " Attack- | Hash- |",
+ " Mode | Type | Example command",
+ " ==================+=======+==================================================================",
+ " Wordlist | $P$ | %s -a 0 -m 400 example400.hash example.dict",
+ " Wordlist + Rules | MD5 | %s -a 0 -m 0 example0.hash example.dict -r rules/best64.rule",
+ " Brute-Force | MD5 | %s -a 3 -m 0 example0.hash ?a?a?a?a?a?a",
+ " Combinator | MD5 | %s -a 1 -m 0 example0.hash example.dict example.dict",
+ "",
+ "If you still have no idea what just happened try following pages:",
"",
"* https://hashcat.net/wiki/#howtos_videos_papers_articles_etc_in_the_wild",
"* https://hashcat.net/wiki/#frequently_asked_questions",
- "",
NULL
};
* flush
*/
- #ifdef _WIN
- fputc ('\r', out);
- fputc ('\n', out);
- #endif
-
- #ifdef _POSIX
- fputc ('\n', out);
- #endif
-
+ fputs (EOL, out);
fflush (out);
}
{
if (data.devices_status == STATUS_INIT) return;
if (data.devices_status == STATUS_STARTING) return;
- if (data.devices_status == STATUS_BYPASS) return;
+
+ // in this case some required buffers are free'd, ascii_digest() would run into segfault
+ if (data.shutdown_inner == 1) return;
if (data.machine_readable == 1)
{
}
else
{
- char display_etc[32] = { 0 };
+ char display_etc[32] = { 0 };
+ char display_runtime[32] = { 0 };
struct tm tm_etc;
+ struct tm tm_runtime;
struct tm *tmp = NULL;
#ifdef WIN
-
tmp = _gmtime64 (&sec_etc);
-
#else
-
tmp = gmtime (&sec_etc);
-
#endif
if (tmp != NULL)
{
- memset (&tm_etc, 0, sizeof (tm_etc));
-
memcpy (&tm_etc, tmp, sizeof (tm_etc));
format_timer_display (&tm_etc, display_etc, sizeof (display_etc));
if (etc[etc_len - 1] == '\n') etc[etc_len - 1] = 0;
if (etc[etc_len - 2] == '\r') etc[etc_len - 2] = 0;
- log_info ("Time.Estimated.: %s (%s)", etc, display_etc);
+ if (data.runtime)
+ {
+ time_t runtime_cur;
+
+ time (&runtime_cur);
+
+ #ifdef WIN
+
+ __time64_t runtime_left = data.proc_start + data.runtime - runtime_cur;
+
+ tmp = _gmtime64 (&runtime_left);
+
+ #else
+
+ time_t runtime_left = data.proc_start + data.runtime - runtime_cur;
+
+ tmp = gmtime (&runtime_left);
+
+ #endif
+
+ if ((tmp != NULL) && (runtime_left > 0) && (runtime_left < sec_etc))
+ {
+ memcpy (&tm_runtime, tmp, sizeof (tm_runtime));
+
+ format_timer_display (&tm_runtime, display_runtime, sizeof (display_runtime));
+
+ log_info ("Time.Estimated.: %s (%s), but limited (%s)", etc, display_etc, display_runtime);
+ }
+ else
+ {
+ log_info ("Time.Estimated.: %s (%s), but limit exceeded", etc, display_etc);
+ }
+ }
+ else
+ {
+ log_info ("Time.Estimated.: %s (%s)", etc, display_etc);
+ }
}
}
}
{
if (data.devices_status == STATUS_INIT) return;
if (data.devices_status == STATUS_STARTING) return;
- if (data.devices_status == STATUS_BYPASS) return;
+
+ if (data.shutdown_inner == 1) return;
if (data.machine_readable == 1)
{
format_speed_display (hashes_dev_ms[device_id] * 1000, display_dev_cur, sizeof (display_dev_cur));
- log_info ("Speed.Dev.#%d.: %9sH/s (%0.2fms)", device_id + 1, display_dev_cur, exec_all_ms[device_id]);
+ if (data.devices_active >= 10)
+ {
+ log_info ("Speed.Dev.#%d: %9sH/s (%0.2fms)", device_id + 1, display_dev_cur, exec_all_ms[device_id]);
+ }
+ else
+ {
+ log_info ("Speed.Dev.#%d.: %9sH/s (%0.2fms)", device_id + 1, display_dev_cur, exec_all_ms[device_id]);
+ }
}
char display_all_cur[16] = { 0 };
snprintf (cached_file, 255, "%s/kernels/amp_a%d.%s.kernel", profile_dir, attack_kern, device_name_chksum);
}
+static char *filename_from_filepath (char *filepath)
+{
+ char *ptr = NULL;
+
+ if ((ptr = strrchr (filepath, '/')) != NULL)
+ {
+ ptr++;
+ }
+ else if ((ptr = strrchr (filepath, '\\')) != NULL)
+ {
+ ptr++;
+ }
+ else
+ {
+ ptr = filepath;
+ }
+
+ return ptr;
+}
+
static uint convert_from_hex (char *line_buf, const uint line_len)
{
if (line_len & 1) return (line_len); // not in hex
fflush (stdout);
}
-static void gidd_to_pw_t (hc_device_param_t *device_param, const u64 gidd, pw_t *pw)
+static int gidd_to_pw_t (hc_device_param_t *device_param, const u64 gidd, pw_t *pw)
{
- hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, gidd * sizeof (pw_t), sizeof (pw_t), pw, 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, gidd * sizeof (pw_t), sizeof (pw_t), pw, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueReadBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ return 0;
}
static void check_hash (hc_device_param_t *device_param, plain_t *plain)
}
}
-static void check_cracked (hc_device_param_t *device_param, const uint salt_pos)
+static int check_cracked (hc_device_param_t *device_param, const uint salt_pos)
{
salt_t *salt_buf = &data.salts_buf[salt_pos];
u32 num_cracked;
- hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);
+ cl_int CL_err;
+
+ CL_err = hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueReadBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (num_cracked)
{
plain_t *cracked = (plain_t *) mycalloc (num_cracked, sizeof (plain_t));
- hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_plain_bufs, CL_TRUE, 0, num_cracked * sizeof (plain_t), cracked, 0, NULL, NULL);
+ CL_err = hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_plain_bufs, CL_TRUE, 0, num_cracked * sizeof (plain_t), cracked, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueReadBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
uint cpt_cracked = 0;
+ hc_thread_mutex_lock (mux_display);
+
for (uint i = 0; i < num_cracked; i++)
{
const uint hash_pos = cracked[i].hash_pos;
if (data.digests_shown[hash_pos] == 1) continue;
- hc_thread_mutex_lock (mux_display);
-
if ((data.opts_type & OPTS_TYPE_PT_NEVERCRACK) == 0)
{
data.digests_shown[hash_pos] = 1;
if (data.salts_done == data.salts_cnt) data.devices_status = STATUS_CRACKED;
- hc_thread_mutex_unlock (mux_display);
-
check_hash (device_param, &cracked[i]);
}
+ hc_thread_mutex_unlock (mux_display);
+
myfree (cracked);
if (cpt_cracked > 0)
memset (data.digests_shown_tmp, 0, salt_buf->digests_cnt * sizeof (uint));
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_digests_shown, CL_TRUE, salt_buf->digests_offset * sizeof (uint), salt_buf->digests_cnt * sizeof (uint), &data.digests_shown_tmp[salt_buf->digests_offset], 0, NULL, NULL);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_digests_shown, CL_TRUE, salt_buf->digests_offset * sizeof (uint), salt_buf->digests_cnt * sizeof (uint), &data.digests_shown_tmp[salt_buf->digests_offset], 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
num_cracked = 0;
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+// stolen from princeprocessor ;)
+
+typedef struct
+{
+ FILE *fp;
+
+ char buf[BUFSIZ];
+ int len;
+
+} out_t;
+
+static void out_flush (out_t *out)
+{
+ fwrite (out->buf, 1, out->len, out->fp);
+
+ out->len = 0;
+}
+
+static void out_push (out_t *out, const u8 *pw_buf, const int pw_len)
+{
+ char *ptr = out->buf + out->len;
+
+ memcpy (ptr, pw_buf, pw_len);
+
+ ptr[pw_len] = '\n';
+
+ out->len += pw_len + 1;
+
+ if (out->len >= BUFSIZ - 100)
+ {
+ out_flush (out);
}
}
static void process_stdout (hc_device_param_t *device_param, const uint pws_cnt)
{
- char out_buf[HCBUFSIZ] = { 0 };
+ out_t out;
+
+ out.fp = stdout;
+ out.len = 0;
uint plain_buf[16] = { 0 };
uint plain_len = 0;
- const uint il_cnt = device_param->kernel_params_buf32[27]; // ugly, i know
+ const uint il_cnt = device_param->kernel_params_buf32[30]; // ugly, i know
if (data.attack_mode == ATTACK_MODE_STRAIGHT)
{
if (plain_len > data.pw_max) plain_len = data.pw_max;
- format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ out_push (&out, plain_ptr, plain_len);
}
}
}
if (plain_len > data.pw_max) plain_len = data.pw_max;
}
- format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ out_push (&out, plain_ptr, plain_len);
}
}
}
plain_len = data.css_cnt;
- format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ out_push (&out, plain_ptr, plain_len);
}
}
}
plain_len += start + stop;
- format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ out_push (&out, plain_ptr, plain_len);
}
}
}
plain_len += start + stop;
- format_output (stdout, out_buf, plain_ptr, plain_len, 0, NULL, 0);
+ out_push (&out, plain_ptr, plain_len);
}
}
}
+
+ out_flush (&out);
}
static void save_hash ()
unlink (old_hashfile);
}
-static void run_kernel (const uint kern_run, hc_device_param_t *device_param, const uint num, const uint event_update, const uint iteration)
+static int run_kernel (const uint kern_run, hc_device_param_t *device_param, const uint num, const uint event_update, const uint iteration)
{
+ cl_int CL_err = CL_SUCCESS;
+
uint num_elements = num;
- device_param->kernel_params_buf32[30] = data.combs_mode;
- device_param->kernel_params_buf32[31] = num;
+ device_param->kernel_params_buf32[33] = data.combs_mode;
+ device_param->kernel_params_buf32[34] = num;
uint kernel_threads = device_param->kernel_threads;
case KERN_RUN_3: kernel = device_param->kernel3; break;
}
- hc_clSetKernelArg (data.ocl, kernel, 21, sizeof (cl_uint), device_param->kernel_params[21]);
- hc_clSetKernelArg (data.ocl, kernel, 22, sizeof (cl_uint), device_param->kernel_params[22]);
- hc_clSetKernelArg (data.ocl, kernel, 23, sizeof (cl_uint), device_param->kernel_params[23]);
- hc_clSetKernelArg (data.ocl, kernel, 24, sizeof (cl_uint), device_param->kernel_params[24]);
- hc_clSetKernelArg (data.ocl, kernel, 25, sizeof (cl_uint), device_param->kernel_params[25]);
- hc_clSetKernelArg (data.ocl, kernel, 26, sizeof (cl_uint), device_param->kernel_params[26]);
- hc_clSetKernelArg (data.ocl, kernel, 27, sizeof (cl_uint), device_param->kernel_params[27]);
- hc_clSetKernelArg (data.ocl, kernel, 28, sizeof (cl_uint), device_param->kernel_params[28]);
- hc_clSetKernelArg (data.ocl, kernel, 29, sizeof (cl_uint), device_param->kernel_params[29]);
- hc_clSetKernelArg (data.ocl, kernel, 30, sizeof (cl_uint), device_param->kernel_params[30]);
- hc_clSetKernelArg (data.ocl, kernel, 31, sizeof (cl_uint), device_param->kernel_params[31]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 24, sizeof (cl_uint), device_param->kernel_params[24]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 25, sizeof (cl_uint), device_param->kernel_params[25]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 26, sizeof (cl_uint), device_param->kernel_params[26]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 27, sizeof (cl_uint), device_param->kernel_params[27]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 28, sizeof (cl_uint), device_param->kernel_params[28]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 29, sizeof (cl_uint), device_param->kernel_params[29]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 30, sizeof (cl_uint), device_param->kernel_params[30]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 31, sizeof (cl_uint), device_param->kernel_params[31]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 32, sizeof (cl_uint), device_param->kernel_params[32]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 33, sizeof (cl_uint), device_param->kernel_params[33]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 34, sizeof (cl_uint), device_param->kernel_params[34]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
cl_event event;
const size_t global_work_size[3] = { num_elements, 32, 1 };
const size_t local_work_size[3] = { kernel_threads / 32, 32, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, &event);
+ CL_err = hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, &event);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueNDRangeKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else
{
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, &event);
+ CL_err = hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, &event);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueNDRangeKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
- hc_clFlush (data.ocl, device_param->command_queue);
+ CL_err = hc_clFlush (data.ocl, device_param->command_queue);
- if (data.devices_status == STATUS_RUNNING)
+ if (CL_err != CL_SUCCESS)
{
- if (iteration < EXPECTED_ITERATIONS)
+ log_error ("ERROR: clFlush(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ if (device_param->nvidia_spin_damp)
+ {
+ if (data.devices_status == STATUS_RUNNING)
{
- switch (kern_run)
+ if (iteration < EXPECTED_ITERATIONS)
{
- case KERN_RUN_1: if (device_param->exec_us_prev1[iteration]) usleep (device_param->exec_us_prev1[iteration]); break;
- case KERN_RUN_2: if (device_param->exec_us_prev2[iteration]) usleep (device_param->exec_us_prev2[iteration]); break;
- case KERN_RUN_3: if (device_param->exec_us_prev3[iteration]) usleep (device_param->exec_us_prev3[iteration]); break;
+ switch (kern_run)
+ {
+ case KERN_RUN_1: if (device_param->exec_us_prev1[iteration]) usleep (device_param->exec_us_prev1[iteration] * device_param->nvidia_spin_damp); break;
+ case KERN_RUN_2: if (device_param->exec_us_prev2[iteration]) usleep (device_param->exec_us_prev2[iteration] * device_param->nvidia_spin_damp); break;
+ case KERN_RUN_3: if (device_param->exec_us_prev3[iteration]) usleep (device_param->exec_us_prev3[iteration] * device_param->nvidia_spin_damp); break;
+ }
}
}
}
- hc_clWaitForEvents (data.ocl, 1, &event);
+ CL_err = hc_clWaitForEvents (data.ocl, 1, &event);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clWaitForEvents(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
cl_ulong time_start;
cl_ulong time_end;
- hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_START, sizeof (time_start), &time_start, NULL);
- hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_END, sizeof (time_end), &time_end, NULL);
+ CL_err |= hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_START, sizeof (time_start), &time_start, NULL);
+ CL_err |= hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_END, sizeof (time_end), &time_end, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetEventProfilingInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
const double exec_us = (double) (time_end - time_start) / 1000;
device_param->exec_pos = exec_pos;
}
- hc_clReleaseEvent (data.ocl, event);
+ CL_err = hc_clReleaseEvent (data.ocl, event);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clReleaseEvent(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFinish (data.ocl, device_param->command_queue);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFinish(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
- hc_clFinish (data.ocl, device_param->command_queue);
+ return 0;
}
-static void run_kernel_mp (const uint kern_run, hc_device_param_t *device_param, const uint num)
+static int run_kernel_mp (const uint kern_run, hc_device_param_t *device_param, const uint num)
{
+ cl_int CL_err = CL_SUCCESS;
+
uint num_elements = num;
switch (kern_run)
switch (kern_run)
{
- case KERN_RUN_MP: hc_clSetKernelArg (data.ocl, kernel, 3, sizeof (cl_ulong), device_param->kernel_params_mp[3]);
- hc_clSetKernelArg (data.ocl, kernel, 4, sizeof (cl_uint), device_param->kernel_params_mp[4]);
- hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_mp[5]);
- hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_mp[6]);
- hc_clSetKernelArg (data.ocl, kernel, 7, sizeof (cl_uint), device_param->kernel_params_mp[7]);
- hc_clSetKernelArg (data.ocl, kernel, 8, sizeof (cl_uint), device_param->kernel_params_mp[8]);
+ case KERN_RUN_MP: CL_err |= hc_clSetKernelArg (data.ocl, kernel, 3, sizeof (cl_ulong), device_param->kernel_params_mp[3]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 4, sizeof (cl_uint), device_param->kernel_params_mp[4]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_mp[5]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_mp[6]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 7, sizeof (cl_uint), device_param->kernel_params_mp[7]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 8, sizeof (cl_uint), device_param->kernel_params_mp[8]);
break;
- case KERN_RUN_MP_R: hc_clSetKernelArg (data.ocl, kernel, 3, sizeof (cl_ulong), device_param->kernel_params_mp_r[3]);
- hc_clSetKernelArg (data.ocl, kernel, 4, sizeof (cl_uint), device_param->kernel_params_mp_r[4]);
- hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_mp_r[5]);
- hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_mp_r[6]);
- hc_clSetKernelArg (data.ocl, kernel, 7, sizeof (cl_uint), device_param->kernel_params_mp_r[7]);
- hc_clSetKernelArg (data.ocl, kernel, 8, sizeof (cl_uint), device_param->kernel_params_mp_r[8]);
+ case KERN_RUN_MP_R: CL_err |= hc_clSetKernelArg (data.ocl, kernel, 3, sizeof (cl_ulong), device_param->kernel_params_mp_r[3]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 4, sizeof (cl_uint), device_param->kernel_params_mp_r[4]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_mp_r[5]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_mp_r[6]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 7, sizeof (cl_uint), device_param->kernel_params_mp_r[7]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 8, sizeof (cl_uint), device_param->kernel_params_mp_r[8]);
break;
- case KERN_RUN_MP_L: hc_clSetKernelArg (data.ocl, kernel, 3, sizeof (cl_ulong), device_param->kernel_params_mp_l[3]);
- hc_clSetKernelArg (data.ocl, kernel, 4, sizeof (cl_uint), device_param->kernel_params_mp_l[4]);
- hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_mp_l[5]);
- hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_mp_l[6]);
- hc_clSetKernelArg (data.ocl, kernel, 7, sizeof (cl_uint), device_param->kernel_params_mp_l[7]);
- hc_clSetKernelArg (data.ocl, kernel, 8, sizeof (cl_uint), device_param->kernel_params_mp_l[8]);
- hc_clSetKernelArg (data.ocl, kernel, 9, sizeof (cl_uint), device_param->kernel_params_mp_l[9]);
+ case KERN_RUN_MP_L: CL_err |= hc_clSetKernelArg (data.ocl, kernel, 3, sizeof (cl_ulong), device_param->kernel_params_mp_l[3]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 4, sizeof (cl_uint), device_param->kernel_params_mp_l[4]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_mp_l[5]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_mp_l[6]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 7, sizeof (cl_uint), device_param->kernel_params_mp_l[7]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 8, sizeof (cl_uint), device_param->kernel_params_mp_l[8]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 9, sizeof (cl_uint), device_param->kernel_params_mp_l[9]);
break;
}
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+ CL_err = hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueNDRangeKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFlush (data.ocl, device_param->command_queue);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFlush(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFinish (data.ocl, device_param->command_queue);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFinish(): %s\n", val2cstr_cl (CL_err));
- hc_clFlush (data.ocl, device_param->command_queue);
+ return -1;
+ }
- hc_clFinish (data.ocl, device_param->command_queue);
+ return 0;
}
-static void run_kernel_tm (hc_device_param_t *device_param)
+static int run_kernel_tm (hc_device_param_t *device_param)
{
+ cl_int CL_err = CL_SUCCESS;
+
const uint num_elements = 1024; // fixed
uint kernel_threads = 32;
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+ CL_err = hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueNDRangeKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFlush (data.ocl, device_param->command_queue);
- hc_clFlush (data.ocl, device_param->command_queue);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFlush(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFinish (data.ocl, device_param->command_queue);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFinish(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
- hc_clFinish (data.ocl, device_param->command_queue);
+ return 0;
}
-static void run_kernel_amp (hc_device_param_t *device_param, const uint num)
+static int run_kernel_amp (hc_device_param_t *device_param, const uint num)
{
+ cl_int CL_err = CL_SUCCESS;
+
uint num_elements = num;
device_param->kernel_params_amp_buf32[5] = data.combs_mode;
cl_kernel kernel = device_param->kernel_amp;
- hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_amp[5]);
- hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_amp[6]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_amp[5]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_amp[6]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+ CL_err = hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueNDRangeKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFlush (data.ocl, device_param->command_queue);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFlush(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clFinish (data.ocl, device_param->command_queue);
- hc_clFlush (data.ocl, device_param->command_queue);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFinish(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
- hc_clFinish (data.ocl, device_param->command_queue);
+ return 0;
}
-static void run_kernel_memset (hc_device_param_t *device_param, cl_mem buf, const uint value, const uint num)
+static int run_kernel_memset (hc_device_param_t *device_param, cl_mem buf, const uint value, const uint num)
{
+ cl_int CL_err = CL_SUCCESS;
+
const u32 num16d = num / 16;
const u32 num16m = num % 16;
cl_kernel kernel = device_param->kernel_memset;
- hc_clSetKernelArg (data.ocl, kernel, 0, sizeof (cl_mem), (void *) &buf);
- hc_clSetKernelArg (data.ocl, kernel, 1, sizeof (cl_uint), device_param->kernel_params_memset[1]);
- hc_clSetKernelArg (data.ocl, kernel, 2, sizeof (cl_uint), device_param->kernel_params_memset[2]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 0, sizeof (cl_mem), (void *) &buf);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 1, sizeof (cl_uint), device_param->kernel_params_memset[1]);
+ CL_err |= hc_clSetKernelArg (data.ocl, kernel, 2, sizeof (cl_uint), device_param->kernel_params_memset[2]);
- const size_t global_work_size[3] = { num_elements, 1, 1 };
- const size_t local_work_size[3] = { kernel_threads, 1, 1 };
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+ return -1;
+ }
- hc_clFlush (data.ocl, device_param->command_queue);
+ const size_t global_work_size[3] = { num_elements, 1, 1 };
+ const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clFinish (data.ocl, device_param->command_queue);
- }
+ CL_err = hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
- if (num16m)
- {
- u32 tmp[4];
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueNDRangeKernel(): %s\n", val2cstr_cl (CL_err));
- tmp[0] = value;
- tmp[1] = value;
- tmp[2] = value;
- tmp[3] = value;
+ return -1;
+ }
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, buf, CL_TRUE, num16d * 16, num16m, tmp, 0, NULL, NULL);
- }
-}
+ CL_err = hc_clFlush (data.ocl, device_param->command_queue);
-static void run_kernel_bzero (hc_device_param_t *device_param, cl_mem buf, const size_t size)
-{
- run_kernel_memset (device_param, buf, 0, size);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFlush(): %s\n", val2cstr_cl (CL_err));
- /*
- int rc = -1;
+ return -1;
+ }
- if (device_param->opencl_v12 && device_param->platform_vendor_id == VENDOR_ID_AMD)
- {
- // So far tested, amd is the only supporting this OpenCL 1.2 function without segfaulting
+ CL_err = hc_clFinish (data.ocl, device_param->command_queue);
- const cl_uchar zero = 0;
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clFinish(): %s\n", val2cstr_cl (CL_err));
- rc = hc_clEnqueueFillBuffer (data.ocl, device_param->command_queue, buf, &zero, sizeof (cl_uchar), 0, size, 0, NULL, NULL);
+ return -1;
+ }
}
- if (rc != 0)
+ if (num16m)
{
- // NOTE: clEnqueueFillBuffer () always fails with -59
- // IOW, it's not supported by Nvidia drivers <= 352.21, also pocl segfaults, also on apple
- // How's that possible, OpenCL 1.2 support is advertised??
- // We need to workaround...
+ u32 tmp[4];
- #define FILLSZ 0x100000
+ tmp[0] = value;
+ tmp[1] = value;
+ tmp[2] = value;
+ tmp[3] = value;
- char *tmp = (char *) mymalloc (FILLSZ);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, buf, CL_TRUE, num16d * 16, num16m, tmp, 0, NULL, NULL);
- for (size_t i = 0; i < size; i += FILLSZ)
+ if (CL_err != CL_SUCCESS)
{
- const size_t left = size - i;
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
- const size_t fillsz = MIN (FILLSZ, left);
-
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, buf, CL_TRUE, i, fillsz, tmp, 0, NULL, NULL);
+ return -1;
}
-
- myfree (tmp);
}
- */
+
+ return 0;
+}
+
+static int run_kernel_bzero (hc_device_param_t *device_param, cl_mem buf, const size_t size)
+{
+ return run_kernel_memset (device_param, buf, 0, size);
}
-static void choose_kernel (hc_device_param_t *device_param, const uint attack_exec, const uint attack_mode, const uint opts_type, const salt_t *salt_buf, const uint highest_pw_len, const uint pws_cnt, const uint fast_iteration)
+static int choose_kernel (hc_device_param_t *device_param, const uint attack_exec, const uint attack_mode, const uint opts_type, const salt_t *salt_buf, const uint highest_pw_len, const uint pws_cnt, const uint fast_iteration)
{
+ cl_int CL_err = CL_SUCCESS;
+
if (data.hash_mode == 2000)
{
process_stdout (device_param, pws_cnt);
- return;
+ return 0;
}
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
run_kernel_tm (device_param);
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_tm_c, device_param->d_bfs_c, 0, 0, size_tm, 0, NULL, NULL);
+ CL_err = hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_tm_c, device_param->d_bfs_c, 0, 0, size_tm, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueCopyBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
{
run_kernel (KERN_RUN_12, device_param, pws_cnt, false, 0);
- hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+ CL_err = hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
- // do something with data
-
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
- }
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueReadBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ // do something with data
+
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+ }
uint iter = salt_buf->salt_iter;
loop_left = MIN (loop_left, loop_step);
- device_param->kernel_params_buf32[25] = loop_pos;
- device_param->kernel_params_buf32[26] = loop_left;
+ device_param->kernel_params_buf32[28] = loop_pos;
+ device_param->kernel_params_buf32[29] = loop_left;
run_kernel (KERN_RUN_2, device_param, pws_cnt, true, slow_iteration);
if (data.devices_status == STATUS_CRACKED) break;
if (data.devices_status == STATUS_ABORTED) break;
if (data.devices_status == STATUS_QUIT) break;
+ if (data.devices_status == STATUS_BYPASS) break;
/**
* speed
{
run_kernel (KERN_RUN_23, device_param, pws_cnt, false, 0);
- hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+ CL_err = hc_clEnqueueReadBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueReadBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
// do something with data
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
run_kernel (KERN_RUN_3, device_param, pws_cnt, false, 0);
}
+
+ return 0;
}
static int run_rule_engine (const int rule_len, const char *rule_buf)
return 1;
}
-static void run_copy (hc_device_param_t *device_param, const uint pws_cnt)
+static int run_copy (hc_device_param_t *device_param, const uint pws_cnt)
{
+ cl_int CL_err = CL_SUCCESS;
+
if (data.attack_kern == ATTACK_KERN_STRAIGHT)
{
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, pws_cnt * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, pws_cnt * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (data.attack_kern == ATTACK_KERN_COMBI)
{
}
}
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, pws_cnt * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, pws_cnt * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (data.attack_kern == ATTACK_KERN_BF)
{
run_kernel_mp (KERN_RUN_MP_L, device_param, pws_cnt);
}
+
+ return 0;
}
static double try_run (hc_device_param_t *device_param, const u32 kernel_accel, const u32 kernel_loops)
{
const u32 kernel_power_try = device_param->device_processors * device_param->kernel_threads * kernel_accel;
- device_param->kernel_params_buf32[25] = 0;
- device_param->kernel_params_buf32[26] = kernel_loops; // not a bug, both need to be set
- device_param->kernel_params_buf32[27] = kernel_loops; // because there's two variables for inner iters for slow and fast hashes
+ device_param->kernel_params_buf32[28] = 0;
+ device_param->kernel_params_buf32[29] = kernel_loops; // not a bug, both need to be set
+ device_param->kernel_params_buf32[30] = kernel_loops; // because there's two variables for inner iters for slow and fast hashes
if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
return exec_ms_prev;
}
-static void autotune (hc_device_param_t *device_param)
+static int autotune (hc_device_param_t *device_param)
{
const double target_ms = TARGET_MS_PROFILE[data.workload_profile - 1];
device_param->kernel_power = kernel_power;
- return;
+ return 0;
}
// from here it's clear we are allowed to autotune
device_param->pws_buf[i].pw_len = 7 + (i & 7);
}
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, kernel_power_max * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, kernel_power_max * sizeof (pw_t), device_param->pws_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
if (data.attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (data.kernel_rules_cnt > 1)
{
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_rules, device_param->d_rules_c, 0, 0, MIN (kernel_loops_max, KERNEL_RULES) * sizeof (kernel_rule_t), 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_rules, device_param->d_rules_c, 0, 0, MIN (kernel_loops_max, KERNEL_RULES) * sizeof (kernel_rule_t), 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueCopyBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
else
{
clear_prompt ();
- log_info ("Device #%u: autotuned kernel-accel to %u\n"
- "Device #%u: autotuned kernel-loops to %u\n",
+ log_info ("- Device #%u: autotuned kernel-accel to %u\n"
+ "- Device #%u: autotuned kernel-loops to %u\n",
device_param->device_id + 1, kernel_accel,
device_param->device_id + 1, kernel_loops);
}
#endif
+
+ return 0;
}
-static void run_cracker (hc_device_param_t *device_param, const uint pws_cnt)
+static int run_cracker (hc_device_param_t *device_param, const uint pws_cnt)
{
char *line_buf = (char *) mymalloc (HCBUFSIZ);
salt_t *salt_buf = &data.salts_buf[salt_pos];
- device_param->kernel_params_buf32[24] = salt_pos;
- device_param->kernel_params_buf32[28] = salt_buf->digests_cnt;
- device_param->kernel_params_buf32[29] = salt_buf->digests_offset;
+ device_param->kernel_params_buf32[27] = salt_pos;
+ device_param->kernel_params_buf32[31] = salt_buf->digests_cnt;
+ device_param->kernel_params_buf32[32] = salt_buf->digests_offset;
FILE *combs_fp = device_param->combs_fp;
device_param->innerloop_pos = innerloop_pos;
device_param->innerloop_left = innerloop_left;
- device_param->kernel_params_buf32[27] = innerloop_left;
+ device_param->kernel_params_buf32[30] = innerloop_left;
// i think we can get rid of this
if (innerloop_left == 0)
if (data.attack_mode == ATTACK_MODE_STRAIGHT)
{
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_rules, device_param->d_rules_c, innerloop_pos * sizeof (kernel_rule_t), 0, innerloop_left * sizeof (kernel_rule_t), 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_rules, device_param->d_rules_c, innerloop_pos * sizeof (kernel_rule_t), 0, innerloop_left * sizeof (kernel_rule_t), 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueCopyBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (data.attack_mode == ATTACK_MODE_COMBI)
{
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_combs_c, CL_TRUE, 0, innerloop_left * sizeof (comb_t), device_param->combs_buf, 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_combs_c, CL_TRUE, 0, innerloop_left * sizeof (comb_t), device_param->combs_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (data.attack_mode == ATTACK_MODE_BF)
{
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_bfs, device_param->d_bfs_c, 0, 0, innerloop_left * sizeof (bf_t), 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_bfs, device_param->d_bfs_c, 0, 0, innerloop_left * sizeof (bf_t), 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueCopyBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (data.attack_mode == ATTACK_MODE_HYBRID1)
{
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_combs, device_param->d_combs_c, 0, 0, innerloop_left * sizeof (comb_t), 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_combs, device_param->d_combs_c, 0, 0, innerloop_left * sizeof (comb_t), 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueCopyBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (data.attack_mode == ATTACK_MODE_HYBRID2)
{
- hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_combs, device_param->d_combs_c, 0, 0, innerloop_left * sizeof (comb_t), 0, NULL, NULL);
+ cl_int CL_err = hc_clEnqueueCopyBuffer (data.ocl, device_param->command_queue, device_param->d_combs, device_param->d_combs_c, 0, 0, innerloop_left * sizeof (comb_t), 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueCopyBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
if (data.benchmark == 1)
hc_timer_set (&device_param->timer_speed);
}
- choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt, fast_iteration);
+ int rc = choose_kernel (device_param, data.attack_exec, data.attack_mode, data.opts_type, salt_buf, highest_pw_len, pws_cnt, fast_iteration);
+
+ if (rc == -1) return -1;
if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
if (data.devices_status == STATUS_CRACKED) break;
if (data.devices_status == STATUS_ABORTED) break;
if (data.devices_status == STATUS_QUIT) break;
+ if (data.devices_status == STATUS_BYPASS) break;
/**
* result
device_param->speed_pos = speed_pos;
myfree (line_buf);
+
+ return 0;
}
static void load_segment (wl_data_t *wl_data, FILE *fd)
return (p);
}
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while (data.shutdown_inner == 0)
{
hc_sleep (sleep_time);
{
if (data.hm_nvapi)
{
- NV_GPU_PERF_POLICIES_INFO_PARAMS_V1 perfPolicies_info = { 0 };
- NV_GPU_PERF_POLICIES_STATUS_PARAMS_V1 perfPolicies_status = { 0 };
+ NV_GPU_PERF_POLICIES_INFO_PARAMS_V1 perfPolicies_info;
+ NV_GPU_PERF_POLICIES_STATUS_PARAMS_V1 perfPolicies_status;
+
+ memset (&perfPolicies_info, 0, sizeof (NV_GPU_PERF_POLICIES_INFO_PARAMS_V1));
+ memset (&perfPolicies_status, 0, sizeof (NV_GPU_PERF_POLICIES_STATUS_PARAMS_V1));
perfPolicies_info.version = MAKE_NVAPI_VERSION (NV_GPU_PERF_POLICIES_INFO_PARAMS_V1, 1);
perfPolicies_status.version = MAKE_NVAPI_VERSION (NV_GPU_PERF_POLICIES_STATUS_PARAMS_V1, 1);
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef WIN
+ hm_set_fanspeed_with_device_id_nvapi (device_id, fan_speed_new, 1);
+ #endif
+ #ifdef __linux__
+ hm_set_fanspeed_with_device_id_xnvctrl (device_id, fan_speed_new);
+ #endif
}
fan_speed_chgd[device_id] = 1;
time (&runtime_cur);
- int runtime_left = data.runtime_start + data.runtime - runtime_cur;
+ int runtime_left = data.proc_start + data.runtime - runtime_cur;
if (runtime_left <= 0)
{
if (status_left == 0)
{
- //hc_thread_mutex_lock (mux_display);
+ hc_thread_mutex_lock (mux_display);
if (data.quiet == 0) clear_prompt ();
if (data.quiet == 0) log_info ("");
- //hc_thread_mutex_unlock (mux_display);
+ hc_thread_mutex_unlock (mux_display);
status_left = data.status_timer;
}
uint check_left = outfile_check_timer; // or 1 if we want to check it at startup
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while (data.shutdown_inner == 0)
{
hc_sleep (1);
hc_thread_mutex_lock (mux_dispatcher);
const u64 words_cur = data.words_cur;
- const u64 words_base = (data.limit == 0) ? data.words_base : data.limit;
+ const u64 words_base = (data.limit == 0) ? data.words_base : MIN (data.limit, data.words_base);
device_param->words_off = words_cur;
const uint attack_kern = data.attack_kern;
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
hc_thread_mutex_lock (mux_dispatcher);
if (attack_mode == ATTACK_MODE_BF)
{
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
const uint work = get_work (device_param, -1);
u64 words_cur = 0;
- while ((data.devices_status != STATUS_EXHAUSTED) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ while ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
u64 words_off = 0;
u64 words_fin = 0;
salt_t *salt_buf = &data.salts_buf[salt_pos];
- device_param->kernel_params_buf32[24] = salt_pos;
- device_param->kernel_params_buf32[27] = 1;
- device_param->kernel_params_buf32[28] = salt_buf->digests_cnt;
- device_param->kernel_params_buf32[29] = salt_buf->digests_offset;
- device_param->kernel_params_buf32[30] = 0;
- device_param->kernel_params_buf32[31] = 1;
+ device_param->kernel_params_buf32[27] = salt_pos;
+ device_param->kernel_params_buf32[30] = 1;
+ device_param->kernel_params_buf32[31] = salt_buf->digests_cnt;
+ device_param->kernel_params_buf32[32] = salt_buf->digests_offset;
+ device_param->kernel_params_buf32[33] = 0;
+ device_param->kernel_params_buf32[34] = 1;
char *dictfile_old = data.dictfile;
loop_left = MIN (loop_left, loop_step);
- device_param->kernel_params_buf32[25] = loop_pos;
- device_param->kernel_params_buf32[26] = loop_left;
+ device_param->kernel_params_buf32[28] = loop_pos;
+ device_param->kernel_params_buf32[29] = loop_left;
run_kernel (KERN_RUN_2, device_param, 1, false, 0);
}
* cleanup
*/
- device_param->kernel_params_buf32[24] = 0;
- device_param->kernel_params_buf32[25] = 0;
- device_param->kernel_params_buf32[26] = 0;
device_param->kernel_params_buf32[27] = 0;
device_param->kernel_params_buf32[28] = 0;
device_param->kernel_params_buf32[29] = 0;
device_param->kernel_params_buf32[30] = 0;
device_param->kernel_params_buf32[31] = 0;
+ device_param->kernel_params_buf32[32] = 0;
+ device_param->kernel_params_buf32[33] = 0;
+ device_param->kernel_params_buf32[34] = 0;
data.dictfile = dictfile_old;
umask (077);
+ /**
+ * There's some buggy OpenCL runtime that do not support -I.
+ * A workaround is to chdir() to the OpenCL folder,
+ * then compile the kernels,
+ * then chdir() back to where we came from so we need to save it first
+ */
+
+ char cwd[1024];
+
+ if (getcwd (cwd, sizeof (cwd) - 1) == NULL)
+ {
+ log_error ("ERROR: getcwd(): %s", strerror (errno));
+
+ return -1;
+ }
+
/**
* Real init
*/
uint workload_profile = WORKLOAD_PROFILE;
uint kernel_accel = KERNEL_ACCEL;
uint kernel_loops = KERNEL_LOOPS;
+ uint nvidia_spin_damp = NVIDIA_SPIN_DAMP;
uint gpu_temp_disable = GPU_TEMP_DISABLE;
#ifdef HAVE_HWMON
uint gpu_temp_abort = GPU_TEMP_ABORT;
#define IDX_CPU_AFFINITY 0xff25
#define IDX_OPENCL_DEVICES 'd'
#define IDX_OPENCL_PLATFORMS 0xff72
- #define IDX_OPENCL_DEVICE_TYPES 0xff73
+ #define IDX_OPENCL_DEVICE_TYPES 'D'
#define IDX_OPENCL_VECTOR_WIDTH 0xff74
#define IDX_WORKLOAD_PROFILE 'w'
#define IDX_KERNEL_ACCEL 'n'
#define IDX_KERNEL_LOOPS 'u'
+ #define IDX_NVIDIA_SPIN_DAMP 0xff79
#define IDX_GPU_TEMP_DISABLE 0xff29
#define IDX_GPU_TEMP_ABORT 0xff30
#define IDX_GPU_TEMP_RETAIN 0xff31
#define IDX_CUSTOM_CHARSET_3 '3'
#define IDX_CUSTOM_CHARSET_4 '4'
- char short_options[] = "hVvm:a:r:j:k:g:o:t:d:n:u:c:p:s:l:1:2:3:4:ibw:";
+ char short_options[] = "hVvm:a:r:j:k:g:o:t:d:D:n:u:c:p:s:l:1:2:3:4:ibw:";
struct option long_options[] =
{
{"workload-profile", required_argument, 0, IDX_WORKLOAD_PROFILE},
{"kernel-accel", required_argument, 0, IDX_KERNEL_ACCEL},
{"kernel-loops", required_argument, 0, IDX_KERNEL_LOOPS},
+ {"nvidia-spin-damp", required_argument, 0, IDX_NVIDIA_SPIN_DAMP},
{"gpu-temp-disable", no_argument, 0, IDX_GPU_TEMP_DISABLE},
#ifdef HAVE_HWMON
{"gpu-temp-abort", required_argument, 0, IDX_GPU_TEMP_ABORT},
case IDX_SESSION: session = optarg; break;
case IDX_SHOW: show = 1; break;
case IDX_LEFT: left = 1; break;
- case '?': return (-1);
+ case '?': return -1;
}
}
{
log_error ("ERROR: Invalid argument specified");
- return (-1);
+ return -1;
}
/**
{
log_info ("%s", VERSION_TAG);
- return (0);
+ return 0;
}
if (usage)
{
usage_big_print (PROGNAME);
- return (0);
+ return 0;
}
/**
char *exec_path = get_exec_path ();
- #ifdef LINUX
+
+ #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
char *resolved_install_folder = realpath (INSTALL_FOLDER, NULL);
char *resolved_exec_path = realpath (exec_path, NULL);
+ if (resolved_install_folder == NULL)
+ {
+ log_error ("ERROR: %s: %s", resolved_install_folder, strerror (errno));
+
+ return -1;
+ }
+
+ if (resolved_exec_path == NULL)
+ {
+ log_error ("ERROR: %s: %s", resolved_exec_path, strerror (errno));
+
+ return -1;
+ }
+
char *install_dir = get_install_dir (resolved_exec_path);
char *profile_dir = NULL;
char *session_dir = NULL;
if (show == 1) log_error ("ERROR: Mixing --restore parameter and --show is not supported");
else log_error ("ERROR: Mixing --restore parameter and --left is not supported");
- return (-1);
+ return -1;
}
// this allows the user to use --show and --left while cracking (i.e. while another instance of hashcat is running)
{
log_error ("ERROR: Incompatible restore-file version");
- return (-1);
+ return -1;
}
myargc = rd->argc;
uint runtime_chgd = 0;
uint kernel_loops_chgd = 0;
uint kernel_accel_chgd = 0;
+ uint nvidia_spin_damp_chgd = 0;
uint attack_mode_chgd = 0;
uint outfile_format_chgd = 0;
uint rp_gen_seed_chgd = 0;
kernel_accel_chgd = 1; break;
case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
kernel_loops_chgd = 1; break;
+ case IDX_NVIDIA_SPIN_DAMP: nvidia_spin_damp = atoi (optarg);
+ nvidia_spin_damp_chgd = 1; break;
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
#ifdef HAVE_HWMON
case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg); break;
default:
log_error ("ERROR: Invalid argument specified");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Invalid argument specified");
- return (-1);
+ return -1;
}
/**
{
log_info ("%s (%s) starting in benchmark-mode...", PROGNAME, VERSION_TAG);
log_info ("");
- log_info ("Note: Reported benchmark cracking speed = real cracking speed");
- log_info ("To verify, run hashcat like this: only_one_hash.txt -a 3 -w 3 ?b?b?b?b?b?b?b");
- log_info ("");
}
else
{
{
// do nothing
}
+ else if (keyspace == 1)
+ {
+ // do nothing
+ }
else
{
log_info ("%s (%s) starting...", PROGNAME, VERSION_TAG);
{
log_error ("ERROR: Invalid attack-mode specified");
- return (-1);
+ return -1;
}
if (runtime_chgd && runtime == 0) // just added to remove compiler warnings for runtime_chgd
{
log_error ("ERROR: Invalid runtime specified");
- return (-1);
+ return -1;
}
if (hash_mode_chgd && hash_mode > 13800) // just added to remove compiler warnings for hash_mode_chgd
{
log_error ("ERROR: Invalid hash-type specified");
- return (-1);
+ return -1;
}
// renamed hash modes
{
log_error ("Old -m specified, use -m %d instead", n);
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Mixing support for user names and hashes of type %s is not supported", strhashtype (hash_mode));
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Invalid outfile-format specified");
- return (-1);
+ return -1;
}
if (left == 1)
{
if (outfile_format > 1)
{
- log_error ("ERROR: Mixing outfile-format > 1 is not allowed together with left parameter");
+ log_error ("ERROR: Mixing outfile-format > 1 with left parameter is not allowed");
- return (-1);
+ return -1;
}
}
else
{
if ((outfile_format > 7) && (outfile_format < 16))
{
- log_error ("ERROR: Mixing outfile-format > 7 is not allowed together with show parameter");
+ log_error ("ERROR: Mixing outfile-format > 7 with show parameter is not allowed");
- return (-1);
+ return -1;
}
}
}
{
log_error ("ERROR: Invalid increment-min specified");
- return (-1);
+ return -1;
}
if (increment_max > INCREMENT_MAX)
{
log_error ("ERROR: Invalid increment-max specified");
- return (-1);
+ return -1;
}
if (increment_min > increment_max)
{
log_error ("ERROR: Invalid increment-min specified");
- return (-1);
+ return -1;
}
if ((increment == 1) && (attack_mode == ATTACK_MODE_STRAIGHT))
{
- log_error ("ERROR: increment is not allowed in attack-mode 0");
+ log_error ("ERROR: Increment is not allowed in attack-mode 0");
- return (-1);
+ return -1;
}
if ((increment == 0) && (increment_min_chgd == 1))
{
- log_error ("ERROR: increment-min is only supported together with increment switch");
+ log_error ("ERROR: Increment-min is only supported combined with increment switch");
- return (-1);
+ return -1;
}
if ((increment == 0) && (increment_max_chgd == 1))
{
- log_error ("ERROR: increment-max is only supported together with increment switch");
+ log_error ("ERROR: Increment-max is only supported combined with increment switch");
- return (-1);
+ return -1;
}
if (rp_files_cnt && rp_gen)
{
log_error ("ERROR: Use of both rules-file and rules-generate is not supported");
- return (-1);
+ return -1;
}
if (rp_files_cnt || rp_gen)
{
log_error ("ERROR: Use of rules-file or rules-generate only allowed in attack-mode 0");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Invalid rp-gen-func-min specified");
- return (-1);
+ return -1;
}
if (kernel_accel_chgd == 1)
{
if (force == 0)
{
- log_info ("The manual use of the option -n (or --kernel-accel) is outdated");
- log_info ("Please consider using the option -w instead");
+ log_info ("The manual use of the -n option (or --kernel-accel) is outdated");
+ log_info ("Please consider using the -w option instead");
log_info ("You can use --force to override this but do not post error reports if you do so");
log_info ("");
- return (-1);
+ return -1;
}
if (kernel_accel < 1)
{
log_error ("ERROR: Invalid kernel-accel specified");
- return (-1);
+ return -1;
}
if (kernel_accel > 1024)
{
log_error ("ERROR: Invalid kernel-accel specified");
- return (-1);
+ return -1;
}
}
{
if (force == 0)
{
- log_info ("The manual use of the option -u (or --kernel-loops) is outdated");
- log_info ("Please consider using the option -w instead");
+ log_info ("The manual use of the -u option (or --kernel-loops) is outdated");
+ log_info ("Please consider using the -w option instead");
log_info ("You can use --force to override this but do not post error reports if you do so");
log_info ("");
- return (-1);
+ return -1;
}
if (kernel_loops < 1)
{
log_error ("ERROR: Invalid kernel-loops specified");
- return (-1);
+ return -1;
}
if (kernel_loops > 1024)
{
log_error ("ERROR: Invalid kernel-loops specified");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: workload-profile %i not available", workload_profile);
- return (-1);
+ return -1;
}
if (opencl_vector_width_chgd && (!is_power_of_2(opencl_vector_width) || opencl_vector_width > 16))
{
log_error ("ERROR: opencl-vector-width %i not allowed", opencl_vector_width);
- return (-1);
+ return -1;
}
if (show == 1 || left == 1)
{
log_error ("ERROR: Mixing remove parameter not allowed with show parameter or left parameter");
- return (-1);
+ return -1;
}
if (potfile_disable == 1)
{
log_error ("ERROR: Mixing potfile-disable parameter not allowed with show parameter or left parameter");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Invalid argument for benchmark mode specified");
- return (-1);
+ return -1;
}
if (attack_mode_chgd == 1)
{
log_error ("ERROR: Only attack-mode 3 allowed in benchmark mode");
- return (-1);
+ return -1;
}
}
}
{
usage_mini_print (myargv[0]);
- return (-1);
+ return -1;
}
}
else if (attack_kern == ATTACK_KERN_STRAIGHT)
{
usage_mini_print (myargv[0]);
- return (-1);
+ return -1;
}
}
else if (attack_kern == ATTACK_KERN_COMBI)
{
usage_mini_print (myargv[0]);
- return (-1);
+ return -1;
}
}
else if (attack_kern == ATTACK_KERN_BF)
{
usage_mini_print (myargv[0]);
- return (-1);
+ return -1;
}
}
else
{
usage_mini_print (myargv[0]);
- return (-1);
+ return -1;
}
}
{
if (show == 1)
{
- log_error ("ERROR: Mixing show parameter not supported with keyspace parameter");
+ log_error ("ERROR: Combining show parameter with keyspace parameter is not allowed");
- return (-1);
+ return -1;
}
else if (left == 1)
{
- log_error ("ERROR: Mixing left parameter not supported wiht keyspace parameter");
+ log_error ("ERROR: Combining left parameter with keyspace parameter is not allowed");
- return (-1);
+ return -1;
}
potfile_disable = 1;
kernel_loops = 1024;
force = 1;
outfile_check_timer = 0;
+ session = "stdout";
+ opencl_vector_width = 1;
}
if (remove_timer_chgd == 1)
{
log_error ("ERROR: Parameter remove-timer require parameter remove enabled");
- return (-1);
+ return -1;
}
if (remove_timer < 1)
{
log_error ("ERROR: Parameter remove-timer must have a value greater than or equal to 1");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Parameter loopback not allowed without rules-file or rules-generate");
- return (-1);
+ return -1;
}
}
else
{
log_error ("ERROR: Parameter loopback allowed in attack-mode 0 only");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Parameter debug-mode option is only available with attack-mode 0");
- return (-1);
+ return -1;
}
if ((rp_files_cnt == 0) && (rp_gen == 0))
{
log_error ("ERROR: Parameter debug-mode not allowed without rules-file or rules-generate");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Invalid debug-mode specified");
- return (-1);
+ return -1;
}
if (debug_file != NULL)
{
log_error ("ERROR: Parameter debug-file requires parameter debug-mode to be set");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Parameter induction-dir not allowed with brute-force attacks");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: setting --weak-hash-threshold allowed only in straight-attack mode");
- return (-1);
+ return -1;
}
weak_hash_threshold = 0;
}
+ if (nvidia_spin_damp > 100)
+ {
+ log_error ("ERROR: setting --nvidia-spin-damp must be between 0 and 100 (inclusive)");
+
+ return -1;
+ }
+
+
/**
* induction directory
*/
{
log_error ("ERROR: Rename directory %s to %s: %s", induction_directory, induction_directory_mv, strerror (errno));
- return (-1);
+ return -1;
}
}
else
{
log_error ("ERROR: %s: %s", induction_directory, strerror (errno));
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: %s: %s", induction_directory, strerror (errno));
- return (-1);
+ return -1;
}
}
}
{
log_error ("ERROR: Directory specified in outfile-check '%s' is not a valid directory", outfile_check_directory);
- return (-1);
+ return -1;
}
}
else if (outfile_check_dir == NULL)
{
log_error ("ERROR: %s: %s", outfile_check_directory, strerror (errno));
- return (-1);
+ return -1;
}
}
}
logfile_top_uint (force);
logfile_top_uint (kernel_accel);
logfile_top_uint (kernel_loops);
+ logfile_top_uint (nvidia_spin_damp);
logfile_top_uint (gpu_temp_disable);
#ifdef HAVE_HWMON
logfile_top_uint (gpu_temp_abort);
restore_disable = 1;
potfile_disable = 1;
weak_hash_threshold = 0;
+ nvidia_spin_damp = 0;
gpu_temp_disable = 1;
outfile_check_timer = 0;
}
}
+ /**
+ * status, monitor and outfile remove threads
+ */
+
+ uint wordlist_mode = ((optind + 1) < myargc) ? WL_MODE_FILE : WL_MODE_STDIN;
+
+ data.wordlist_mode = wordlist_mode;
+
+ if (wordlist_mode == WL_MODE_STDIN)
+ {
+ status = 1;
+
+ data.status = status;
+ }
+
+ uint outer_threads_cnt = 0;
+
+ hc_thread_t *outer_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
+
+ data.shutdown_outer = 0;
+
+ if (keyspace == 0 && benchmark == 0 && stdout_flag == 0)
+ {
+ if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
+ {
+ hc_thread_create (outer_threads[outer_threads_cnt], thread_keypress, NULL);
+
+ outer_threads_cnt++;
+ }
+ }
+
/**
* config
*/
dgst_pos3 = 1;
break;
- case 190: hash_type = HASH_TYPE_SHA1;
- salt_type = SALT_TYPE_NONE;
- attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
- opts_type = OPTS_TYPE_PT_GENERATE_BE
- | OPTS_TYPE_PT_ADD80
- | OPTS_TYPE_PT_ADDBITS15;
- kern_type = KERN_TYPE_SHA1_LINKEDIN;
- dgst_size = DGST_SIZE_4_5;
- parse_func = sha1linkedin_parse_hash;
- sort_by_digest = sort_by_digest_4_5;
- opti_type = OPTI_TYPE_ZERO_BYTE
- | OPTI_TYPE_PRECOMPUTE_INIT
- | OPTI_TYPE_EARLY_SKIP
- | OPTI_TYPE_NOT_ITERATED
- | OPTI_TYPE_NOT_SALTED;
- dgst_pos0 = 0;
- dgst_pos1 = 4;
- dgst_pos2 = 3;
- dgst_pos3 = 2;
- break;
-
case 200: hash_type = HASH_TYPE_MYSQL;
salt_type = SALT_TYPE_NONE;
attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
salt_type = SALT_TYPE_NONE;
attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
opts_type = OPTS_TYPE_PT_GENERATE_LE;
- kern_type = 0;
+ kern_type = KERN_TYPE_STDOUT;
dgst_size = DGST_SIZE_4_4;
parse_func = NULL;
sort_by_digest = NULL;
dgst_pos3 = 6;
break;
- default: usage_mini_print (PROGNAME); return (-1);
+ default: usage_mini_print (PROGNAME); return -1;
}
/**
{
log_error ("ERROR: Parameter hex-salt not valid for hash-type %u", hash_mode);
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: %s: %s", potfile, strerror (errno));
- return (-1);
+ return -1;
}
if (outfile != NULL)
fclose (pot_fp);
- return (-1);
+ return -1;
}
}
else
{
log_error ("ERROR: %s: %s", potfile, strerror (errno));
- return (-1);
+ return -1;
}
data.pot_fp = pot_fp;
{
log_error ("ERROR: %s: %s", data.hashfile, strerror (errno));
- return (-1);
+ return -1;
}
hashes_avail = st.st_size / sizeof (hccap_t);
{
log_error ("ERROR: %s: %s", hashfile, strerror (errno));
- return (-1);
+ return -1;
}
if (data.quiet == 0) log_info_nn ("Counting lines in %s", hashfile);
fclose (fp);
- return (-1);
+ return -1;
}
hashlist_format = hlfmt_detect (fp, 100); // 100 = max numbers to "scan". could be hashes_avail, too
fclose (fp);
- return (-1);
+ return -1;
}
fclose (fp);
if (hash_fmt_error)
{
- log_info ("WARNING: failed to parse hashes using the '%s' format", strhlfmt (hashlist_format));
+ log_info ("WARNING: Failed to parse hashes using the '%s' format", strhlfmt (hashlist_format));
}
else
{
{
log_error ("ERROR: hccap file not specified");
- return (-1);
+ return -1;
}
hashlist_mode = HL_MODE_FILE;
{
log_error ("ERROR: %s: %s", hash_buf, strerror (errno));
- return (-1);
+ return -1;
}
if (hashes_avail < 1)
fclose (fp);
- return (-1);
+ return -1;
}
uint hccap_size = sizeof (hccap_t);
{
log_error ("ERROR: %s: %s", hashfile, strerror (errno));
- return (-1);
+ return -1;
}
uint line_num = 0;
if (parser_status < PARSER_GLOBAL_ZERO)
{
- log_info ("WARNING: Hashfile '%s' in line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
+ log_info ("WARNING: Hashfile '%s' on line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
continue;
}
if (parser_status < PARSER_GLOBAL_ZERO)
{
- log_info ("WARNING: Hashfile '%s' in line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
+ log_info ("WARNING: Hashfile '%s' on line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
continue;
}
if (parser_status < PARSER_GLOBAL_ZERO)
{
- log_info ("WARNING: Hashfile '%s' in line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
+ log_info ("WARNING: Hashfile '%s' on line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
continue;
}
if (parser_status < PARSER_GLOBAL_ZERO)
{
- log_info ("WARNING: Hashfile '%s' in line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
+ log_info ("WARNING: Hashfile '%s' on line %u (%s): %s", data.hashfile, line_num, line_buf, strparser (parser_status));
continue;
}
if (data.quiet == 0) log_info_nn ("");
- return (0);
+ return 0;
}
if ((keyspace == 0) && (stdout_flag == 0))
{
log_error ("ERROR: No hashes loaded");
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: Hashfile and Outfile are not allowed to point to the same file");
- return (-1);
+ return -1;
}
#endif
{
log_error ("ERROR: Hashfile and Outfile are not allowed to point to the same file");
- return (-1);
+ return -1;
}
#endif
}
// here we have in line_buf: ESSID:MAC1:MAC2 (without the plain)
// manipulate salt_buf
+ memset (line_buf_cpy, 0, HCBUFSIZ);
memcpy (line_buf_cpy, line_buf, i);
char *mac2_pos = strrchr (line_buf_cpy, ':');
{
log_error ("ERROR: %s: %s", rp_file, strerror (errno));
- return (-1);
+ return -1;
}
while (!feof (fp))
if (result == -1)
{
- log_info ("WARNING: Skipping invalid or unsupported rule in file %s in line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("WARNING: Skipping invalid or unsupported rule in file %s on line %u: %s", rp_file, rule_line, rule_buf);
continue;
}
if (cpu_rule_to_kernel_rule (rule_buf, rule_len, &kernel_rules_buf[kernel_rules_cnt]) == -1)
{
- log_info ("WARNING: Cannot convert rule for use on device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("WARNING: Cannot convert rule for use on OpenCL device in file %s on line %u: %s", rp_file, rule_line, rule_buf);
memset (&kernel_rules_buf[kernel_rules_cnt], 0, sizeof (kernel_rule_t)); // needs to be cleared otherwise we could have some remaining data
/* its so slow
if (rulefind (&kernel_rules_buf[kernel_rules_cnt], kernel_rules_buf, kernel_rules_cnt, sizeof (kernel_rule_t), sort_by_kernel_rule))
{
- log_info ("Duplicate rule for use on device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("Duplicate rule for use on OpenCL device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
continue;
}
* generate NOP rules
*/
- if (kernel_rules_cnt == 0)
+ if ((rp_files_cnt == 0) && (rp_gen == 0))
{
kernel_rules_buf = (kernel_rule_t *) mymalloc (sizeof (kernel_rule_t));
data.kernel_rules_cnt = kernel_rules_cnt;
data.kernel_rules_buf = kernel_rules_buf;
+ if (kernel_rules_cnt == 0)
+ {
+ log_error ("ERROR: No valid rules left");
+
+ return -1;
+ }
+
/**
* OpenCL platforms: detect
*/
if (keyspace == 0)
{
- hc_clGetPlatformIDs (data.ocl, CL_PLATFORMS_MAX, platforms, &platforms_cnt);
+ cl_int CL_err = hc_clGetPlatformIDs (data.ocl, CL_PLATFORMS_MAX, platforms, &platforms_cnt);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetPlatformIDs(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (platforms_cnt == 0)
{
log_info (" NVidia users require NVidia drivers 346.59 or later (recommended 361.x or later)");
log_info ("");
- return (-1);
+ return -1;
}
if (opencl_platforms_filter != (uint) -1)
{
log_error ("ERROR: The platform selected by the --opencl-platforms parameter is larger than the number of available platforms (%d)", platforms_cnt);
- return (-1);
+ return -1;
}
}
}
- /**
- * OpenCL device types:
- * In case the user did not specify --opencl-device-types and the user runs hashcat in a system with only a CPU only he probably want to use that CPU.
- * In such a case, automatically enable CPU device type support, since it's disabled by default.
- */
-
if (opencl_device_types == NULL)
{
+ /**
+ * OpenCL device types:
+ * In case the user did not specify --opencl-device-types and the user runs hashcat in a system with only a CPU only he probably want to use that CPU.
+ */
+
cl_device_type device_types_all = 0;
for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
cl_platform_id platform = platforms[platform_id];
- hc_clGetDeviceIDs (data.ocl, platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
+ cl_int CL_err = hc_clGetDeviceIDs (data.ocl, platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ //log_error ("ERROR: clGetDeviceIDs(): %s\n", val2cstr_cl (CL_err));
+
+ //return -1;
+
+ // Silently ignore at this point, it will be reused later and create a note for the user at that point
+
+ continue;
+ }
for (uint platform_devices_id = 0; platform_devices_id < platform_devices_cnt; platform_devices_id++)
{
cl_device_type device_type;
- hc_clGetDeviceInfo (data.ocl, device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+ cl_int CL_err = hc_clGetDeviceInfo (data.ocl, device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_types_all |= device_type;
}
}
+ // In such a case, automatically enable CPU device type support, since it's disabled by default.
+
if ((device_types_all & (CL_DEVICE_TYPE_GPU | CL_DEVICE_TYPE_ACCELERATOR)) == 0)
{
device_types_filter |= CL_DEVICE_TYPE_CPU;
}
+
+ // In another case, when the user uses --stdout, using CPU devices is much faster to setup
+ // If we have a CPU device, force it to be used
+
+ if (stdout_flag == 1)
+ {
+ if (device_types_all & CL_DEVICE_TYPE_CPU)
+ {
+ device_types_filter = CL_DEVICE_TYPE_CPU;
+ }
+ }
}
/**
* OpenCL devices: simply push all devices from all platforms into the same device array
*/
- int need_adl = 0;
- int need_nvapi = 0;
- int need_nvml = 0;
+ int need_adl = 0;
+ int need_nvapi = 0;
+ int need_nvml = 0;
+ int need_xnvctrl = 0;
hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
for (uint platform_id = 0; platform_id < platforms_cnt; platform_id++)
{
- if ((opencl_platforms_filter & (1 << platform_id)) == 0) continue;
+ cl_int CL_err = CL_SUCCESS;
cl_platform_id platform = platforms[platform_id];
- hc_clGetDeviceIDs (data.ocl, platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
-
char platform_vendor[INFOSZ] = { 0 };
- hc_clGetPlatformInfo (data.ocl, platform, CL_PLATFORM_VENDOR, sizeof (platform_vendor), platform_vendor, NULL);
+ CL_err = hc_clGetPlatformInfo (data.ocl, platform, CL_PLATFORM_VENDOR, sizeof (platform_vendor), platform_vendor, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetPlatformInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
// find our own platform vendor because pocl and mesa are pushing original vendor_id through opencl
// this causes trouble with vendor id based macros
platform_vendor_id = VENDOR_ID_GENERIC;
}
+ uint platform_skipped = ((opencl_platforms_filter & (1 << platform_id)) == 0);
+
+ CL_err = hc_clGetDeviceIDs (data.ocl, platform, CL_DEVICE_TYPE_ALL, DEVICES_MAX, platform_devices, &platform_devices_cnt);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ //log_error ("ERROR: clGetDeviceIDs(): %s\n", val2cstr_cl (CL_err));
+
+ //return -1;
+
+ platform_skipped = 2;
+ }
+
+ if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
+ {
+ if (machine_readable == 0)
+ {
+ if (platform_skipped == 0)
+ {
+ const int len = log_info ("OpenCL Platform #%u: %s", platform_id + 1, platform_vendor);
+
+ char line[256] = { 0 };
+
+ for (int i = 0; i < len; i++) line[i] = '=';
+
+ log_info (line);
+ }
+ else if (platform_skipped == 1)
+ {
+ log_info ("OpenCL Platform #%u: %s, skipped", platform_id + 1, platform_vendor);
+ log_info ("");
+ }
+ else if (platform_skipped == 2)
+ {
+ log_info ("OpenCL Platform #%u: %s, skipped! No OpenCL compatible devices found", platform_id + 1, platform_vendor);
+ log_info ("");
+ }
+ }
+ }
+
+ if (platform_skipped == 1) continue;
+ if (platform_skipped == 2) continue;
+
for (uint platform_devices_id = 0; platform_devices_id < platform_devices_cnt; platform_devices_id++)
{
size_t param_value_size = 0;
device_param->platform_devices_id = platform_devices_id;
+ device_param->platform = platform;
+
// device_type
cl_device_type device_type;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_type &= ~CL_DEVICE_TYPE_DEFAULT;
// device_name
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NAME, 0, NULL, ¶m_value_size);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NAME, 0, NULL, ¶m_value_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
char *device_name = (char *) mymalloc (param_value_size);
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NAME, param_value_size, device_name, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NAME, param_value_size, device_name, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_name = device_name;
// device_vendor
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VENDOR, 0, NULL, ¶m_value_size);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VENDOR, 0, NULL, ¶m_value_size);
- char *device_vendor = (char *) mymalloc (param_value_size);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VENDOR, param_value_size, device_vendor, NULL);
+ return -1;
+ }
+
+ char *device_vendor = (char *) mymalloc (param_value_size);
+
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VENDOR, param_value_size, device_vendor, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_vendor = device_vendor;
// device_version
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VERSION, 0, NULL, ¶m_value_size);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VERSION, 0, NULL, ¶m_value_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
char *device_version = (char *) mymalloc (param_value_size);
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VERSION, param_value_size, device_version, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_VERSION, param_value_size, device_version, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_version = device_version;
// device_opencl_version
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_OPENCL_C_VERSION, 0, NULL, ¶m_value_size);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_OPENCL_C_VERSION, 0, NULL, ¶m_value_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
char *device_opencl_version = (char *) mymalloc (param_value_size);
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_OPENCL_C_VERSION, param_value_size, device_opencl_version, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_OPENCL_C_VERSION, param_value_size, device_opencl_version, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->opencl_v12 = device_opencl_version[9] > '1' || device_opencl_version[11] >= '2';
{
if (opti_type & OPTI_TYPE_USES_BITS_64)
{
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, sizeof (vector_width), &vector_width, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, sizeof (vector_width), &vector_width, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else
{
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, sizeof (vector_width), &vector_width, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
else
cl_uint device_processors;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (device_processors), &device_processors, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof (device_processors), &device_processors, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_processors = device_processors;
cl_ulong device_maxmem_alloc;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (device_maxmem_alloc), &device_maxmem_alloc, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof (device_maxmem_alloc), &device_maxmem_alloc, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_maxmem_alloc = MIN (device_maxmem_alloc, 0x7fffffff);
cl_ulong device_global_mem;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (device_global_mem), &device_global_mem, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof (device_global_mem), &device_global_mem, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_global_mem = device_global_mem;
size_t device_maxworkgroup_size;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof (device_maxworkgroup_size), &device_maxworkgroup_size, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof (device_maxworkgroup_size), &device_maxworkgroup_size, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_maxworkgroup_size = device_maxworkgroup_size;
cl_uint device_maxclock_frequency;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (device_maxclock_frequency), &device_maxclock_frequency, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof (device_maxclock_frequency), &device_maxclock_frequency, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->device_maxclock_frequency = device_maxclock_frequency;
cl_bool device_endian_little;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_ENDIAN_LITTLE, sizeof (device_endian_little), &device_endian_little, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_ENDIAN_LITTLE, sizeof (device_endian_little), &device_endian_little, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (device_endian_little == CL_FALSE)
{
- log_info ("Device #%u: WARNING: not little endian device", device_id + 1);
+ log_info ("- Device #%u: WARNING: Not a little endian device", device_id + 1);
device_param->skipped = 1;
}
cl_bool device_available;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_AVAILABLE, sizeof (device_available), &device_available, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_AVAILABLE, sizeof (device_available), &device_available, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (device_available == CL_FALSE)
{
- log_info ("Device #%u: WARNING: device not available", device_id + 1);
+ log_info ("- Device #%u: WARNING: Device not available", device_id + 1);
device_param->skipped = 1;
}
cl_bool device_compiler_available;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_COMPILER_AVAILABLE, sizeof (device_compiler_available), &device_compiler_available, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_COMPILER_AVAILABLE, sizeof (device_compiler_available), &device_compiler_available, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (device_compiler_available == CL_FALSE)
{
- log_info ("Device #%u: WARNING: device no compiler available", device_id + 1);
+ log_info ("- Device #%u: WARNING: No compiler available for device", device_id + 1);
device_param->skipped = 1;
}
cl_device_exec_capabilities device_execution_capabilities;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_EXECUTION_CAPABILITIES, sizeof (device_execution_capabilities), &device_execution_capabilities, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_EXECUTION_CAPABILITIES, sizeof (device_execution_capabilities), &device_execution_capabilities, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if ((device_execution_capabilities & CL_EXEC_KERNEL) == 0)
{
- log_info ("Device #%u: WARNING: device does not support executing kernels", device_id + 1);
+ log_info ("- Device #%u: WARNING: Device does not support executing kernels", device_id + 1);
device_param->skipped = 1;
}
size_t device_extensions_size;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_EXTENSIONS, 0, NULL, &device_extensions_size);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_EXTENSIONS, 0, NULL, &device_extensions_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
char *device_extensions = mymalloc (device_extensions_size + 1);
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_EXTENSIONS, device_extensions_size, device_extensions, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_EXTENSIONS, device_extensions_size, device_extensions, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (strstr (device_extensions, "base_atomics") == 0)
{
- log_info ("Device #%u: WARNING: device does not support base atomics", device_id + 1);
+ log_info ("- Device #%u: WARNING: Device does not support base atomics", device_id + 1);
device_param->skipped = 1;
}
if (strstr (device_extensions, "byte_addressable_store") == 0)
{
- log_info ("Device #%u: WARNING: device does not support byte addressable store", device_id + 1);
+ log_info ("- Device #%u: WARNING: Device does not support byte addressable store", device_id + 1);
device_param->skipped = 1;
}
cl_ulong device_local_mem_size;
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_LOCAL_MEM_SIZE, sizeof (device_local_mem_size), &device_local_mem_size, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_LOCAL_MEM_SIZE, sizeof (device_local_mem_size), &device_local_mem_size, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (device_local_mem_size < 32768)
{
- log_info ("Device #%u: WARNING: device local mem size is too small", device_id + 1);
+ log_info ("- Device #%u: WARNING: Device local mem size is too small", device_id + 1);
device_param->skipped = 1;
}
{
if (algorithm_pos == 0)
{
- log_info ("Device #%u: WARNING: not native intel opencl runtime, expect massive speed loss", device_id + 1);
- log_info (" You can use --force to override this but do not post error reports if you do so");
+ log_info ("- Device #%u: WARNING: Not a native Intel OpenCL runtime, expect massive speed loss", device_id + 1);
+ log_info (" You can use --force to override this but do not post error reports if you do so");
}
device_param->skipped = 1;
// driver_version
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DRIVER_VERSION, 0, NULL, ¶m_value_size);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DRIVER_VERSION, 0, NULL, ¶m_value_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
char *driver_version = (char *) mymalloc (param_value_size);
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DRIVER_VERSION, param_value_size, driver_version, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DRIVER_VERSION, param_value_size, driver_version, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->driver_version = driver_version;
device_param->device_name_chksum = device_name_chksum;
- // device_processor_cores
+ // vendor specific
if (device_param->device_type & CL_DEVICE_TYPE_GPU)
{
{
need_nvml = 1;
- #ifdef _WIN
+ #ifdef __linux__
+ need_xnvctrl = 1;
+ #endif
+
+ #ifdef WIN
need_nvapi = 1;
#endif
}
}
- // device_processor_cores
-
- if (device_type & CL_DEVICE_TYPE_CPU)
- {
- cl_uint device_processor_cores = 1;
-
- device_param->device_processor_cores = device_processor_cores;
- }
-
if (device_type & CL_DEVICE_TYPE_GPU)
{
- if (device_vendor_id == VENDOR_ID_AMD)
- {
- cl_uint device_processor_cores = 0;
-
- #define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043
-
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof (device_processor_cores), &device_processor_cores, NULL);
-
- device_param->device_processor_cores = device_processor_cores;
- }
- else if (device_vendor_id == VENDOR_ID_NV)
+ if (device_vendor_id == VENDOR_ID_NV)
{
cl_uint kernel_exec_timeout = 0;
#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
-
- device_param->kernel_exec_timeout = kernel_exec_timeout;
-
- cl_uint device_processor_cores = 0;
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, sizeof (kernel_exec_timeout), &kernel_exec_timeout, NULL);
- #define CL_DEVICE_WARP_SIZE_NV 0x4003
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_WARP_SIZE_NV, sizeof (device_processor_cores), &device_processor_cores, NULL);
+ return -1;
+ }
- device_param->device_processor_cores = device_processor_cores;
+ device_param->kernel_exec_timeout = kernel_exec_timeout;
cl_uint sm_minor = 0;
cl_uint sm_major = 0;
#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
- hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, sizeof (sm_minor), &sm_minor, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clGetDeviceInfo (data.ocl, device_param->device, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, sizeof (sm_major), &sm_major, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetDeviceInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
device_param->sm_minor = sm_minor;
device_param->sm_major = sm_major;
- }
- else
- {
- cl_uint device_processor_cores = 1;
- device_param->device_processor_cores = device_processor_cores;
+ // CPU burning loop damper
+ // Value is given as number between 0-100
+ // By default 100%
+
+ device_param->nvidia_spin_damp = (double) nvidia_spin_damp;
+
+ if (nvidia_spin_damp_chgd == 0)
+ {
+ if (data.attack_mode == ATTACK_MODE_STRAIGHT)
+ {
+ /**
+ * the workaround is not a friend of rule based attacks
+ * the words from the wordlist combined with fast and slow rules cause
+ * fluctuations which cause inaccurate wait time estimations
+ * using a reduced damping percentage almost compensates this
+ */
+
+ device_param->nvidia_spin_damp = 64;
+ }
+ }
+
+ device_param->nvidia_spin_damp /= 100;
}
}
{
if (device_param->skipped == 0)
{
- log_info ("Device #%u: %s, %lu/%lu MB allocatable, %uMCU",
+ log_info ("- Device #%u: %s, %lu/%lu MB allocatable, %uMCU",
device_id + 1,
device_name,
(unsigned int) (device_maxmem_alloc / 1024 / 1024),
}
else
{
- log_info ("Device #%u: %s, skipped",
+ log_info ("- Device #%u: %s, skipped",
device_id + 1,
device_name);
}
if (catalyst_broken == 1)
{
log_info ("");
- log_info ("ATTENTION! The installed catalyst driver in your system is known to be broken!");
- log_info ("It will pass over cracked hashes and does not report them as cracked");
+ log_info ("ATTENTION! The Catalyst driver installed on your system is known to be broken!");
+ log_info ("It passes over cracked hashes and will not report them as cracked");
log_info ("You are STRONGLY encouraged not to use it");
log_info ("You can use --force to override this but do not post error reports if you do so");
log_info ("");
- return (-1);
+ return -1;
}
if (catalyst_warn == 1)
{
log_info ("");
- log_info ("ATTENTION! Unsupported or incorrect installed catalyst driver detected!");
- log_info ("You are STRONGLY encouraged to use the official supported catalyst driver for good reasons");
+ log_info ("ATTENTION! Unsupported or incorrectly installed Catalyst driver detected!");
+ log_info ("You are STRONGLY encouraged to use the official supported catalyst driver");
log_info ("See hashcat's homepage for official supported catalyst drivers");
#ifdef _WIN
log_info ("Also see: http://hashcat.net/wiki/doku.php?id=upgrading_amd_drivers_how_to");
log_info ("You can use --force to override this but do not post error reports if you do so");
log_info ("");
- return (-1);
+ return -1;
}
}
else if (platform_vendor_id == VENDOR_ID_NV)
{
if (device_param->kernel_exec_timeout != 0)
{
- if (data.quiet == 0) log_info ("Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
- if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
+ if (data.quiet == 0) log_info ("- Device #%u: WARNING! Kernel exec timeout is not disabled, it might cause you errors of code 702", device_id + 1);
+ if (data.quiet == 0) log_info (" See the wiki on how to disable it: https://hashcat.net/wiki/doku.php?id=timeout_patch");
}
}
}
log_info ("A good alternative is the free pocl >= v0.13, but make sure to use a LLVM >= v3.8");
log_info ("");
- return (-1);
+ return -1;
}
}
}
devices_cnt++;
}
+
+ if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
+ {
+ if (machine_readable == 0)
+ {
+ log_info ("");
+ }
+ }
}
if (keyspace == 0 && devices_active == 0)
{
log_error ("ERROR: No devices found/left");
- return (-1);
+ return -1;
}
// additional check to see if the user has chosen a device that is not within the range of available devices (i.e. larger than devices_cnt)
{
log_error ("ERROR: The device specified by the --opencl-devices parameter is larger than the number of available devices (%d)", devices_cnt);
- return (-1);
+ return -1;
}
}
data.devices_active = devices_active;
- if ((benchmark == 1 || quiet == 0) && (algorithm_pos == 0))
- {
- if (machine_readable == 0)
- {
- log_info ("");
- }
- }
-
/**
* HM devices: init
*/
#ifdef HAVE_HWMON
- hm_attrs_t hm_adapters_adl[DEVICES_MAX] = { { 0 } };
- hm_attrs_t hm_adapters_nvapi[DEVICES_MAX] = { { 0 } };
- hm_attrs_t hm_adapters_nvml[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_adl[DEVICES_MAX];
+ hm_attrs_t hm_adapters_nvapi[DEVICES_MAX];
+ hm_attrs_t hm_adapters_nvml[DEVICES_MAX];
+ hm_attrs_t hm_adapters_xnvctrl[DEVICES_MAX];
+
+ memset (hm_adapters_adl, 0, sizeof (hm_adapters_adl));
+ memset (hm_adapters_nvapi, 0, sizeof (hm_adapters_nvapi));
+ memset (hm_adapters_nvml, 0, sizeof (hm_adapters_nvml));
+ memset (hm_adapters_xnvctrl, 0, sizeof (hm_adapters_xnvctrl));
if (gpu_temp_disable == 0)
{
- ADL_PTR *adl = (ADL_PTR *) mymalloc (sizeof (ADL_PTR));
- NVAPI_PTR *nvapi = (NVAPI_PTR *) mymalloc (sizeof (NVAPI_PTR));
- NVML_PTR *nvml = (NVML_PTR *) mymalloc (sizeof (NVML_PTR));
+ ADL_PTR *adl = (ADL_PTR *) mymalloc (sizeof (ADL_PTR));
+ NVAPI_PTR *nvapi = (NVAPI_PTR *) mymalloc (sizeof (NVAPI_PTR));
+ NVML_PTR *nvml = (NVML_PTR *) mymalloc (sizeof (NVML_PTR));
+ XNVCTRL_PTR *xnvctrl = (XNVCTRL_PTR *) mymalloc (sizeof (XNVCTRL_PTR));
- data.hm_adl = NULL;
- data.hm_nvapi = NULL;
- data.hm_nvml = NULL;
+ data.hm_adl = NULL;
+ data.hm_nvapi = NULL;
+ data.hm_nvml = NULL;
+ data.hm_xnvctrl = NULL;
if ((need_nvml == 1) && (nvml_init (nvml) == 0))
{
if (hm_NVML_nvmlDeviceGetFanSpeed (data.hm_nvml, 0, hm_adapters_nvml[i].nvml, &speed) == NVML_SUCCESS) hm_adapters_nvml[i].fan_get_supported = 1;
- hm_NVML_nvmlDeviceSetComputeMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_COMPUTEMODE_EXCLUSIVE_PROCESS);
-
- hm_NVML_nvmlDeviceSetGpuOperationMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_GOM_ALL_ON);
+ // doesn't seem to create any advantages
+ //hm_NVML_nvmlDeviceSetComputeMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_COMPUTEMODE_EXCLUSIVE_PROCESS);
+ //hm_NVML_nvmlDeviceSetGpuOperationMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_GOM_ALL_ON);
}
}
}
}
}
+ if ((need_xnvctrl == 1) && (xnvctrl_init (xnvctrl) == 0))
+ {
+ data.hm_xnvctrl = xnvctrl;
+ }
+
+ if (data.hm_xnvctrl)
+ {
+ if (hm_XNVCTRL_XOpenDisplay (data.hm_xnvctrl) == 0)
+ {
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+
+ hm_adapters_xnvctrl[device_id].xnvctrl = device_id;
+
+ int speed = 0;
+
+ if (get_fan_speed_current (data.hm_xnvctrl, device_id, &speed) == 0) hm_adapters_xnvctrl[device_id].fan_get_supported = 1;
+ }
+ }
+ }
+
if ((need_adl == 1) && (adl_init (adl) == 0))
{
data.hm_adl = adl;
int hm_adapters_num;
- if (get_adapters_num_adl (data.hm_adl, &hm_adapters_num) != 0) return (-1);
+ if (get_adapters_num_adl (data.hm_adl, &hm_adapters_num) != 0) return -1;
// adapter info
LPAdapterInfo lpAdapterInfo = hm_get_adapter_info_adl (data.hm_adl, hm_adapters_num);
- if (lpAdapterInfo == NULL) return (-1);
+ if (lpAdapterInfo == NULL) return -1;
// get a list (of ids of) valid/usable adapters
}
}
- if (data.hm_adl == NULL && data.hm_nvml == NULL)
+ if (data.hm_adl == NULL && data.hm_nvml == NULL && data.hm_xnvctrl == NULL)
{
gpu_temp_disable = 1;
}
* OpenCL devices: allocate buffer for device specific information
*/
- int *temp_retain_fanspeed_value = (int *) mycalloc (data.devices_cnt, sizeof (int));
- int *temp_retain_fanpolicy_value = (int *) mycalloc (data.devices_cnt, sizeof (int));
-
ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (data.devices_cnt, sizeof (ADLOD6MemClockState));
int *od_power_control_status = (int *) mycalloc (data.devices_cnt, sizeof (int));
{
if (gpu_temp_abort < gpu_temp_retain)
{
- log_error ("ERROR: invalid values for gpu-temp-abort. Parameter gpu-temp-abort is less than gpu-temp-retain.");
+ log_error ("ERROR: Invalid values for gpu-temp-abort. Parameter gpu-temp-abort is less than gpu-temp-retain.");
- return (-1);
+ return -1;
}
}
*/
#ifdef HAVE_HWMON
- if (gpu_temp_disable == 0 && data.hm_adl == NULL && data.hm_nvml == NULL)
+ if (gpu_temp_disable == 0 && data.hm_adl == NULL && data.hm_nvml == NULL && data.hm_xnvctrl == NULL)
{
log_info ("Watchdog: Hardware Monitoring Interface not found on your system");
}
data.hm_device[device_id].adl = hm_adapters_adl[platform_devices_id].adl;
data.hm_device[device_id].nvapi = 0;
data.hm_device[device_id].nvml = 0;
+ data.hm_device[device_id].xnvctrl = 0;
data.hm_device[device_id].od_version = hm_adapters_adl[platform_devices_id].od_version;
data.hm_device[device_id].fan_get_supported = hm_adapters_adl[platform_devices_id].fan_get_supported;
- data.hm_device[device_id].fan_set_supported = hm_adapters_adl[platform_devices_id].fan_set_supported;
+ data.hm_device[device_id].fan_set_supported = 0;
}
if (device_param->device_vendor_id == VENDOR_ID_NV)
data.hm_device[device_id].adl = 0;
data.hm_device[device_id].nvapi = hm_adapters_nvapi[platform_devices_id].nvapi;
data.hm_device[device_id].nvml = hm_adapters_nvml[platform_devices_id].nvml;
+ data.hm_device[device_id].xnvctrl = hm_adapters_xnvctrl[platform_devices_id].xnvctrl;
data.hm_device[device_id].od_version = 0;
data.hm_device[device_id].fan_get_supported = hm_adapters_nvml[platform_devices_id].fan_get_supported;
data.hm_device[device_id].fan_set_supported = 0;
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
- return (-1);
+ return -1;
}
// first backup current value, we will restore it later
{
log_error ("ERROR: Failed to get current ADL PowerControl settings");
- return (-1);
+ return -1;
}
if ((ADL_rc = hm_ADL_Overdrive_PowerControl_Set (data.hm_adl, data.hm_device[device_id].adl, powertune.iMaxValue)) != ADL_OK)
{
log_error ("ERROR: Failed to set new ADL PowerControl values");
- return (-1);
+ return -1;
}
// clocks
{
log_error ("ERROR: Failed to get ADL memory and engine clock frequency");
- return (-1);
+ return -1;
}
// Query capabilities only to see if profiles were not "damaged", if so output a warning but do accept the users profile settings
{
log_error ("ERROR: Failed to get ADL device capabilities");
- return (-1);
+ return -1;
}
int engine_clock_max = caps.sEngineClockRange.iMax * 0.6666;
if ((engine_clock_max - engine_clock_profile_max) > warning_trigger_engine)
{
- log_info ("WARN: the custom profile seems to have too low maximum engine clock values. You therefore may not reach full performance");
+ log_info ("WARN: The custom profile seems to have too low maximum engine clock values. You therefore may not reach full performance");
}
if ((memory_clock_max - memory_clock_profile_max) > warning_trigger_memory)
{
- log_info ("WARN: the custom profile seems to have too low maximum memory clock values. You therefore may not reach full performance");
+ log_info ("WARN: The custom profile seems to have too low maximum memory clock values. You therefore may not reach full performance");
}
ADLOD6StateInfo *performance_state = (ADLOD6StateInfo*) mycalloc (1, sizeof (ADLOD6StateInfo) + sizeof (ADLOD6PerformanceLevel));
{
log_info ("ERROR: Failed to set ADL performance state");
- return (-1);
+ return -1;
}
local_free (performance_state);
{
log_error ("ERROR: Failed to get current ADL PowerControl settings");
- return (-1);
+ return -1;
}
if ((ADL_rc = hm_ADL_Overdrive_PowerControl_Set (data.hm_adl, data.hm_device[device_id].adl, powertune.iMaxValue)) != ADL_OK)
{
log_error ("ERROR: Failed to set new ADL PowerControl values");
- return (-1);
+ return -1;
}
}
}
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
+ cl_int CL_err = CL_SUCCESS;
+
/**
* host buffer
*/
const char *device_name_chksum = device_param->device_name_chksum;
const u32 device_processors = device_param->device_processors;
- const u32 device_processor_cores = device_param->device_processor_cores;
/**
* create context for each device
*/
- device_param->context = hc_clCreateContext (data.ocl, NULL, 1, &device_param->device, NULL, NULL);
+ cl_context_properties properties[3];
+
+ properties[0] = CL_CONTEXT_PLATFORM;
+ properties[1] = (cl_context_properties) device_param->platform;
+ properties[2] = 0;
+
+ CL_err = hc_clCreateContext (data.ocl, properties, 1, &device_param->device, NULL, NULL, &device_param->context);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateContext(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
/**
* create command-queue
// not supported with NV
// device_param->command_queue = hc_clCreateCommandQueueWithProperties (device_param->context, device_param->device, NULL);
- device_param->command_queue = hc_clCreateCommandQueue (data.ocl, device_param->context, device_param->device, CL_QUEUE_PROFILING_ENABLE);
+ CL_err = hc_clCreateCommandQueue (data.ocl, device_param->context, device_param->device, CL_QUEUE_PROFILING_ENABLE, &device_param->command_queue);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateCommandQueue(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
/**
* kernel threads: some algorithms need a fixed kernel-threads count
uint kernel_threads = MIN (KERNEL_THREADS_MAX, device_param->device_maxworkgroup_size);
+ if (hash_mode == 8900) kernel_threads = 64; // Scrypt
+ if (hash_mode == 9300) kernel_threads = 64; // Scrypt
+
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
{
kernel_threads = KERNEL_THREADS_MAX_CPU;
// scryptV stuff
- size_t size_scryptV = 1;
+ size_t size_scrypt = 4;
if ((hash_mode == 8900) || (hash_mode == 9300))
{
+ // we need to check that all hashes have the same scrypt settings
+
+ const u32 scrypt_N = data.salts_buf[0].scrypt_N;
+ const u32 scrypt_r = data.salts_buf[0].scrypt_r;
+ const u32 scrypt_p = data.salts_buf[0].scrypt_p;
+
+ for (uint i = 1; i < salts_cnt; i++)
+ {
+ if ((data.salts_buf[i].scrypt_N != scrypt_N)
+ || (data.salts_buf[i].scrypt_r != scrypt_r)
+ || (data.salts_buf[i].scrypt_p != scrypt_p))
+ {
+ log_error ("ERROR: Mixed scrypt settings not supported");
+
+ return -1;
+ }
+ }
+
uint tmto_start = 0;
uint tmto_stop = 10;
else
{
// in case the user did not specify the tmto manually
- // use some values known to run best (tested on 290x for AMD and 980ti for NV)
- // but set the lower end only in case the user has a device with too less memory
+ // use some values known to run best (tested on 290x for AMD and GTX1080 for NV)
if (hash_mode == 8900)
{
if (device_param->device_vendor_id == VENDOR_ID_AMD)
{
- tmto_start = 1;
+ tmto_start = 3;
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
- tmto_start = 2;
+ tmto_start = 4;
}
}
}
- for (uint tmto = tmto_start; tmto < tmto_stop; tmto++)
+ data.scrypt_tmp_size = (128 * scrypt_r * scrypt_p);
+
+ device_param->kernel_accel_min = 1;
+ device_param->kernel_accel_max = 8;
+
+ uint tmto;
+
+ for (tmto = tmto_start; tmto < tmto_stop; tmto++)
{
- // TODO: in theory the following calculation needs to be done per salt, not global
- // we assume all hashes have the same scrypt settings
+ size_scrypt = (128 * scrypt_r) * scrypt_N;
- size_scryptV = (128 * data.salts_buf[0].scrypt_r) * data.salts_buf[0].scrypt_N;
+ size_scrypt /= 1 << tmto;
- size_scryptV /= 1 << tmto;
+ size_scrypt *= device_param->device_processors * device_param->kernel_threads * device_param->kernel_accel_max;
+
+ if ((size_scrypt / 4) > device_param->device_maxmem_alloc)
+ {
+ if (quiet == 0) log_info ("WARNING: Not enough single-block device memory allocatable to use --scrypt-tmto %d, increasing...", tmto);
- size_scryptV *= device_processors * device_processor_cores;
+ continue;
+ }
- if (size_scryptV > device_param->device_maxmem_alloc)
+ if (size_scrypt > device_param->device_global_mem)
{
- if (quiet == 0) log_info ("WARNING: not enough device memory allocatable to use --scrypt-tmto %d, increasing...", tmto);
+ if (quiet == 0) log_info ("WARNING: Not enough total device memory allocatable to use --scrypt-tmto %d, increasing...", tmto);
continue;
}
for (uint salts_pos = 0; salts_pos < data.salts_cnt; salts_pos++)
{
- data.salts_buf[salts_pos].scrypt_tmto = tmto;
- data.salts_buf[salts_pos].scrypt_phy = device_processors * device_processor_cores;
+ data.scrypt_tmto_final = tmto;
}
break;
}
- if (data.salts_buf[0].scrypt_phy == 0)
+ if (tmto == tmto_stop)
{
- log_error ("ERROR: can't allocate enough device memory");
+ log_error ("ERROR: Can't allocate enough device memory");
return -1;
}
- if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %u\n", data.salts_buf[0].scrypt_tmto, size_scryptV);
+ if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %llu\n", data.scrypt_tmto_final, (unsigned long long int) size_scrypt);
}
+ size_t size_scrypt4 = size_scrypt / 4;
+
/**
* some algorithms need a fixed kernel-loops count
*/
case 7900: size_tmps = kernel_power_max * sizeof (drupal7_tmp_t); break;
case 8200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha512_tmp_t); break;
case 8800: size_tmps = kernel_power_max * sizeof (androidfde_tmp_t); break;
- case 8900: size_tmps = kernel_power_max * sizeof (scrypt_tmp_t); break;
+ case 8900: size_tmps = kernel_power_max * data.scrypt_tmp_size; break;
case 9000: size_tmps = kernel_power_max * sizeof (pwsafe2_tmp_t); break;
case 9100: size_tmps = kernel_power_max * sizeof (lotus8_tmp_t); break;
case 9200: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
- case 9300: size_tmps = kernel_power_max * sizeof (scrypt_tmp_t); break;
+ case 9300: size_tmps = kernel_power_max * data.scrypt_tmp_size; break;
case 9400: size_tmps = kernel_power_max * sizeof (office2007_tmp_t); break;
case 9500: size_tmps = kernel_power_max * sizeof (office2010_tmp_t); break;
case 9600: size_tmps = kernel_power_max * sizeof (office2013_tmp_t); break;
// now check if all device-memory sizes which depend on the kernel_accel_max amplifier are within its boundaries
// if not, decrease amplifier and try again
- int skip = 0;
+ int memory_limit_hit = 0;
+
+ if (size_pws > device_param->device_maxmem_alloc) memory_limit_hit = 1;
+ if (size_tmps > device_param->device_maxmem_alloc) memory_limit_hit = 1;
+ if (size_hooks > device_param->device_maxmem_alloc) memory_limit_hit = 1;
const u64 size_total
= bitmap_size
+ size_rules
+ size_rules_c
+ size_salts
- + size_scryptV
+ + size_scrypt4
+ + size_scrypt4
+ + size_scrypt4
+ + size_scrypt4
+ size_shown
+ size_tm
+ size_tmps;
- // Don't ask me, ask AMD!
+ if (size_total > device_param->device_global_mem) memory_limit_hit = 1;
- if (size_total > device_param->device_maxmem_alloc) skip = 1;
- if (size_total > device_param->device_global_mem) skip = 1;
-
- if (skip == 1)
+ if (memory_limit_hit == 1)
{
kernel_accel_max--;
break;
}
- /*
- if (kernel_accel_max == 0)
+ if (kernel_accel_max < kernel_accel_min)
{
- log_error ("Device #%u: Device does not provide enough allocatable device-memory to handle hash-type %u", device_id + 1, data.hash_mode);
+ log_error ("- Device #%u: Device does not provide enough allocatable device-memory to handle this attack", device_id + 1);
return -1;
}
- */
device_param->kernel_accel_min = kernel_accel_min;
device_param->kernel_accel_max = kernel_accel_max;
/*
if (kernel_accel_max < kernel_accel)
{
- if (quiet == 0) log_info ("Device #%u: Reduced maximum kernel-accel to %u", device_id + 1, kernel_accel_max);
+ if (quiet == 0) log_info ("- Device #%u: Reduced maximum kernel-accel to %u", device_id + 1, kernel_accel_max);
device_param->kernel_accel = kernel_accel_max;
}
* default building options
*/
- char build_opts[1024] = { 0 };
+ char cpath[1024] = { 0 };
- // we don't have sm_* on vendors not NV but it doesn't matter
+ char build_opts[1024] = { 0 };
#if _WIN
- snprintf (build_opts, sizeof (build_opts) - 1, "-I \"%s\\OpenCL\\\" -I '%s\\OpenCL\\' -I %s\\OpenCL\\ -I\"%s\\OpenCL\\\" -I'%s\\OpenCL\\' -I%s\\OpenCL\\", shared_dir, shared_dir, shared_dir, shared_dir, shared_dir, shared_dir);
- #else
- snprintf (build_opts, sizeof (build_opts) - 1, "-I \"%s/OpenCL/\" -I '%s/OpenCL/' -I %s/OpenCL/ -I\"%s/OpenCL/\" -I'%s/OpenCL/' -I%s/OpenCL/", shared_dir, shared_dir, shared_dir, shared_dir, shared_dir, shared_dir);
- #endif
- char build_opts_new[1024] = { 0 };
-
- snprintf (build_opts_new, sizeof (build_opts_new) - 1, "%s -DVENDOR_ID=%u -DCUDA_ARCH=%d -DVECT_SIZE=%u -DDEVICE_TYPE=%u -DKERN_TYPE=%u -D_unroll -cl-std=CL1.1", build_opts, device_param->device_vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type, kern_type);
+ snprintf (cpath, sizeof (cpath) - 1, "%s\\OpenCL\\", shared_dir);
- strncpy (build_opts, build_opts_new, sizeof (build_opts) - 1);
+ char *cpath_real = mymalloc (MAX_PATH);
- /*
- if (device_param->device_vendor_id == VENDOR_ID_INTEL_SDK)
+ if (GetFullPathName (cpath, MAX_PATH, cpath_real, NULL) == 0)
{
- // we do vectorizing much better than the auto-vectorizer
+ log_error ("ERROR: %s: %s", cpath, "GetFullPathName()");
+
+ return -1;
+ }
+
+ #else
- snprintf (build_opts_new, sizeof (build_opts_new) - 1, "%s -cl-opt-disable", build_opts);
+ snprintf (cpath, sizeof (cpath) - 1, "%s/OpenCL/", shared_dir);
- strncpy (build_opts, build_opts_new, sizeof (build_opts) - 1);
+ char *cpath_real = mymalloc (PATH_MAX);
+
+ if (realpath (cpath, cpath_real) == NULL)
+ {
+ log_error ("ERROR: %s: %s", cpath, strerror (errno));
+
+ return -1;
}
- */
- #ifdef DEBUG
- log_info ("Device #%u: build_opts '%s'\n", device_id + 1, build_opts);
#endif
- /**
- * main kernel
- */
-
+ if (chdir (cpath_real) == -1)
{
- /**
- * kernel source filename
- */
+ log_error ("ERROR: %s: %s", cpath_real, strerror (errno));
- char source_file[256] = { 0 };
+ return -1;
+ }
- generate_source_kernel_filename (attack_exec, attack_kern, kern_type, shared_dir, source_file);
+ #if _WIN
- struct stat sst;
+ naive_replace (cpath_real, '\\', '/');
- if (stat (source_file, &sst) == -1)
- {
- log_error ("ERROR: %s: %s", source_file, strerror (errno));
+ // not escaping here, windows has quotes
- return -1;
- }
+ snprintf (build_opts, sizeof (build_opts) - 1, "-I \"%s\"", cpath_real);
+
+ #else
+
+ naive_escape (cpath_real, PATH_MAX, ' ', '\\');
+
+ snprintf (build_opts, sizeof (build_opts) - 1, "-I %s", cpath_real);
+
+ #endif
+
+ // include check
+ // this test needs to be done manually because of osx opencl runtime
+ // if there's a problem with permission, its not reporting back and erroring out silently
+
+ #define files_cnt 15
+
+ const char *files_names[files_cnt] =
+ {
+ "inc_cipher_aes256.cl",
+ "inc_cipher_serpent256.cl",
+ "inc_cipher_twofish256.cl",
+ "inc_common.cl",
+ "inc_comp_multi_bs.cl",
+ "inc_comp_multi.cl",
+ "inc_comp_single_bs.cl",
+ "inc_comp_single.cl",
+ "inc_hash_constants.h",
+ "inc_hash_functions.cl",
+ "inc_rp.cl",
+ "inc_rp.h",
+ "inc_simd.cl",
+ "inc_types.cl",
+ "inc_vendor.cl",
+ };
+
+ for (int i = 0; i < files_cnt; i++)
+ {
+ FILE *fd = fopen (files_names[i], "r");
+
+ if (fd == NULL)
+ {
+ log_error ("ERROR: %s: fopen(): %s", files_names[i], strerror (errno));
+
+ return -1;
+ }
+
+ char buf[1];
+
+ size_t n = fread (buf, 1, 1, fd);
+
+ if (n != 1)
+ {
+ log_error ("ERROR: %s: fread(): %s", files_names[i], strerror (errno));
+
+ return -1;
+ }
+
+ fclose (fd);
+ }
+
+ myfree (cpath_real);
+
+ // we don't have sm_* on vendors not NV but it doesn't matter
+
+ char build_opts_new[1024] = { 0 };
+
+ snprintf (build_opts_new, sizeof (build_opts_new) - 1, "%s -D VENDOR_ID=%u -D CUDA_ARCH=%d -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D DGST_R0=%u -D DGST_R1=%u -D DGST_R2=%u -D DGST_R3=%u -D DGST_ELEM=%u -D KERN_TYPE=%u -D _unroll -cl-std=CL1.1", build_opts, device_param->device_vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type, data.dgst_pos0, data.dgst_pos1, data.dgst_pos2, data.dgst_pos3, data.dgst_size / 4, kern_type);
+
+ strncpy (build_opts, build_opts_new, sizeof (build_opts));
+
+ #ifdef DEBUG
+ log_info ("- Device #%u: build_opts '%s'\n", device_id + 1, build_opts);
+ #endif
+
+ /**
+ * main kernel
+ */
+
+ {
+ /**
+ * kernel source filename
+ */
+
+ char source_file[256] = { 0 };
+
+ generate_source_kernel_filename (attack_exec, attack_kern, kern_type, shared_dir, source_file);
+
+ struct stat sst;
+
+ if (stat (source_file, &sst) == -1)
+ {
+ log_error ("ERROR: %s: %s", source_file, strerror (errno));
+
+ return -1;
+ }
/**
* kernel cached filename
{
if (cached == 0)
{
- if (quiet == 0) log_info ("Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, cached_file);
+ if (quiet == 0) log_info ("- Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, filename_from_filepath (cached_file));
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
- device_param->program = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL, &device_param->program);
- int rc = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts, NULL, NULL, false);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithSource(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
+
+ //return -1;
+ }
#ifdef DEBUG
size_t build_log_size = 0;
- hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_size);
+ CL_err = hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramBuildInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (build_log_size > 1)
{
- char *build_log = (char *) malloc (build_log_size + 1);
+ char *build_log = (char *) mymalloc (build_log_size + 1);
- memset (build_log, 0, build_log_size + 1);
+ CL_err = hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, build_log_size, build_log, NULL);
- hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, build_log_size, build_log, NULL);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramBuildInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
puts (build_log);
- free (build_log);
+ myfree (build_log);
}
#endif
- if (rc != 0)
+ if (CL_err != CL_SUCCESS)
{
device_param->skipped = true;
- log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+ log_info ("- Device #%u: Kernel %s build failure. Proceeding without this device.", device_id + 1, source_file);
continue;
}
size_t binary_size;
- hc_clGetProgramInfo (data.ocl, device_param->program, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
+ CL_err = hc_clGetProgramInfo (data.ocl, device_param->program, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
u8 *binary = (u8 *) mymalloc (binary_size);
- hc_clGetProgramInfo (data.ocl, device_param->program, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
+ CL_err = hc_clGetProgramInfo (data.ocl, device_param->program, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
writeProgramBin (cached_file, binary, binary_size);
else
{
#ifdef DEBUG
- log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ log_info ("- Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
#endif
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program = hc_clCreateProgramWithBinary (data.ocl, device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithBinary (data.ocl, device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL, &device_param->program);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithBinary(): %s\n", val2cstr_cl (CL_err));
- hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts, NULL, NULL, true);
+ return -1;
+ }
+
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
else
{
#ifdef DEBUG
- log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, source_file, sst.st_size);
+ log_info ("- Device #%u: Kernel %s (%ld bytes)", device_id + 1, source_file, sst.st_size);
#endif
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
- device_param->program = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL, &device_param->program);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithSource(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
char build_opts_update[1024] = { 0 };
if (force_jit_compilation == 1500)
{
- snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DDESCRYPT_SALT=%d", build_opts, data.salts_buf[0].salt_buf[0]);
+ snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DDESCRYPT_SALT=%u", build_opts, data.salts_buf[0].salt_buf[0]);
}
else if (force_jit_compilation == 8900)
{
- snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DSCRYPT_N=%d -DSCRYPT_R=%d -DSCRYPT_P=%d -DSCRYPT_TMTO=%d", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.salts_buf[0].scrypt_tmto);
+ snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s -DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%u -DSCRYPT_TMP_ELEM=%u", build_opts, data.salts_buf[0].scrypt_N, data.salts_buf[0].scrypt_r, data.salts_buf[0].scrypt_p, 1 << data.scrypt_tmto_final, data.scrypt_tmp_size / 16);
}
else
{
snprintf (build_opts_update, sizeof (build_opts_update) - 1, "%s", build_opts);
}
- int rc = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts_update, NULL, NULL, false);
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program, 1, &device_param->device, build_opts_update, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
+
+ //return -1;
+ }
#ifdef DEBUG
size_t build_log_size = 0;
- hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_size);
+ CL_err = hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_size);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramBuildInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (build_log_size > 1)
{
- char *build_log = (char *) malloc (build_log_size + 1);
+ char *build_log = (char *) mymalloc (build_log_size + 1);
- memset (build_log, 0, build_log_size + 1);
+ CL_err = hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, build_log_size, build_log, NULL);
- hc_clGetProgramBuildInfo (data.ocl, device_param->program, device_param->device, CL_PROGRAM_BUILD_LOG, build_log_size, build_log, NULL);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramBuildInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
puts (build_log);
- free (build_log);
+ myfree (build_log);
}
#endif
- if (rc != 0)
+ if (CL_err != CL_SUCCESS)
{
device_param->skipped = true;
- log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+ log_info ("- Device #%u: Kernel %s build failure. Proceeding without this device.", device_id + 1, source_file);
}
}
if (cached == 0)
{
- if (quiet == 0) log_info ("Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, cached_file);
+ if (quiet == 0) log_info ("- Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, filename_from_filepath (cached_file));
if (quiet == 0) log_info ("");
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
- device_param->program_mp = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL, &device_param->program_mp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithSource(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
- int rc = hc_clBuildProgram (data.ocl, device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL, false);
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL);
- if (rc != 0)
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
+
+ //return -1;
+ }
+
+ if (CL_err != CL_SUCCESS)
{
device_param->skipped = true;
- log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+ log_info ("- Device #%u: Kernel %s build failure. Proceeding without this device.", device_id + 1, source_file);
continue;
}
size_t binary_size;
- hc_clGetProgramInfo (data.ocl, device_param->program_mp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
+ CL_err = hc_clGetProgramInfo (data.ocl, device_param->program_mp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
u8 *binary = (u8 *) mymalloc (binary_size);
- hc_clGetProgramInfo (data.ocl, device_param->program_mp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
+ CL_err = hc_clGetProgramInfo (data.ocl, device_param->program_mp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
writeProgramBin (cached_file, binary, binary_size);
else
{
#ifdef DEBUG
- log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ log_info ("- Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
#endif
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_mp = hc_clCreateProgramWithBinary (data.ocl, device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithBinary (data.ocl, device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL, &device_param->program_mp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithBinary(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
- hc_clBuildProgram (data.ocl, device_param->program_mp, 1, &device_param->device, build_opts, NULL, NULL, true);
+ return -1;
+ }
}
local_free (kernel_lengths);
if (cached == 0)
{
- if (quiet == 0) log_info ("Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, cached_file);
+ if (quiet == 0) log_info ("- Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, filename_from_filepath (cached_file));
if (quiet == 0) log_info ("");
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
- device_param->program_amp = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithSource (data.ocl, device_param->context, 1, (const char **) kernel_sources, NULL, &device_param->program_amp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithSource(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
- int rc = hc_clBuildProgram (data.ocl, device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL, false);
+ //return -1;
+ }
- if (rc != 0)
+ if (CL_err != CL_SUCCESS)
{
device_param->skipped = true;
- log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+ log_info ("- Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
continue;
}
size_t binary_size;
- hc_clGetProgramInfo (data.ocl, device_param->program_amp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
+ CL_err = hc_clGetProgramInfo (data.ocl, device_param->program_amp, CL_PROGRAM_BINARY_SIZES, sizeof (size_t), &binary_size, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
u8 *binary = (u8 *) mymalloc (binary_size);
- hc_clGetProgramInfo (data.ocl, device_param->program_amp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
+ CL_err = hc_clGetProgramInfo (data.ocl, device_param->program_amp, CL_PROGRAM_BINARIES, sizeof (binary), &binary, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetProgramInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
writeProgramBin (cached_file, binary, binary_size);
else
{
#ifdef DEBUG
- if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ if (quiet == 0) log_info ("- Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
#endif
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
- device_param->program_amp = hc_clCreateProgramWithBinary (data.ocl, device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL);
+ CL_err = hc_clCreateProgramWithBinary (data.ocl, device_param->context, 1, &device_param->device, kernel_lengths, (const u8 **) kernel_sources, NULL, &device_param->program_amp);
- hc_clBuildProgram (data.ocl, device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL, true);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateProgramWithBinary(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clBuildProgram (data.ocl, device_param->program_amp, 1, &device_param->device, build_opts, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clBuildProgram(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
local_free (kernel_lengths);
local_free (kernel_sources);
}
+ // return back to the folder we came from initially (workaround)
+
+ if (chdir (cwd) == -1)
+ {
+ log_error ("ERROR: %s: %s", cwd, strerror (errno));
+
+ return -1;
+ }
+
// some algorithm collide too fast, make that impossible
if (benchmark == 1)
* global buffers
*/
- device_param->d_pws_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_pws, NULL);
- device_param->d_pws_amp_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_pws, NULL);
- device_param->d_tmps = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_tmps, NULL);
- device_param->d_hooks = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_hooks, NULL);
- device_param->d_bitmap_s1_a = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s1_b = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s1_c = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s1_d = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s2_a = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s2_b = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s2_c = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_bitmap_s2_d = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL);
- device_param->d_plain_bufs = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_plains, NULL);
- device_param->d_digests_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_digests, NULL);
- device_param->d_digests_shown = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_shown, NULL);
- device_param->d_salt_bufs = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_salts, NULL);
- device_param->d_result = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_results, NULL);
- device_param->d_scryptV_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_scryptV, NULL);
-
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_a, CL_TRUE, 0, bitmap_size, bitmap_s1_a, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_b, CL_TRUE, 0, bitmap_size, bitmap_s1_b, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_c, CL_TRUE, 0, bitmap_size, bitmap_s1_c, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_d, CL_TRUE, 0, bitmap_size, bitmap_s1_d, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_a, CL_TRUE, 0, bitmap_size, bitmap_s2_a, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_b, CL_TRUE, 0, bitmap_size, bitmap_s2_b, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_c, CL_TRUE, 0, bitmap_size, bitmap_s2_c, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_d, CL_TRUE, 0, bitmap_size, bitmap_s2_d, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_digests_buf, CL_TRUE, 0, size_digests, data.digests_buf, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_digests_shown, CL_TRUE, 0, size_shown, data.digests_shown, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_salt_bufs, CL_TRUE, 0, size_salts, data.salts_buf, 0, NULL, NULL);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_pws, NULL, &device_param->d_pws_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_pws, NULL, &device_param->d_pws_amp_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_tmps, NULL, &device_param->d_tmps);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_hooks, NULL, &device_param->d_hooks);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s1_a);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s1_b);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s1_c);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s1_d);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s2_a);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s2_b);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s2_c);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, bitmap_size, NULL, &device_param->d_bitmap_s2_d);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_plains, NULL, &device_param->d_plain_bufs);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_digests, NULL, &device_param->d_digests_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_shown, NULL, &device_param->d_digests_shown);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_salts, NULL, &device_param->d_salt_bufs);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_results, NULL, &device_param->d_result);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_scrypt4, NULL, &device_param->d_scryptV0_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_scrypt4, NULL, &device_param->d_scryptV1_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_scrypt4, NULL, &device_param->d_scryptV2_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_WRITE, size_scrypt4, NULL, &device_param->d_scryptV3_buf);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_a, CL_TRUE, 0, bitmap_size, bitmap_s1_a, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_b, CL_TRUE, 0, bitmap_size, bitmap_s1_b, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_c, CL_TRUE, 0, bitmap_size, bitmap_s1_c, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s1_d, CL_TRUE, 0, bitmap_size, bitmap_s1_d, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_a, CL_TRUE, 0, bitmap_size, bitmap_s2_a, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_b, CL_TRUE, 0, bitmap_size, bitmap_s2_b, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_c, CL_TRUE, 0, bitmap_size, bitmap_s2_c, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_bitmap_s2_d, CL_TRUE, 0, bitmap_size, bitmap_s2_d, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_digests_buf, CL_TRUE, 0, size_digests, data.digests_buf, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_digests_shown, CL_TRUE, 0, size_shown, data.digests_shown, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_salt_bufs, CL_TRUE, 0, size_salts, data.salts_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
/**
* special buffers
if (attack_kern == ATTACK_KERN_STRAIGHT)
{
- device_param->d_rules = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_rules, NULL);
- device_param->d_rules_c = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_rules_c, NULL);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_rules, NULL, &device_param->d_rules);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_rules_c, NULL, &device_param->d_rules_c);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_rules, CL_TRUE, 0, size_rules, kernel_rules_buf, 0, NULL, NULL);
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_rules, CL_TRUE, 0, size_rules, kernel_rules_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (attack_kern == ATTACK_KERN_COMBI)
{
- device_param->d_combs = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_combs, NULL);
- device_param->d_combs_c = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_combs, NULL);
- device_param->d_root_css_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_root_css, NULL);
- device_param->d_markov_css_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_markov_css, NULL);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_combs, NULL, &device_param->d_combs);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_combs, NULL, &device_param->d_combs_c);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_root_css, NULL, &device_param->d_root_css_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_markov_css, NULL, &device_param->d_markov_css_buf);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (attack_kern == ATTACK_KERN_BF)
{
- device_param->d_bfs = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_bfs, NULL);
- device_param->d_bfs_c = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_bfs, NULL);
- device_param->d_tm_c = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_tm, NULL);
- device_param->d_root_css_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_root_css, NULL);
- device_param->d_markov_css_buf = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_markov_css, NULL);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_bfs, NULL, &device_param->d_bfs);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_bfs, NULL, &device_param->d_bfs_c);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_tm, NULL, &device_param->d_tm_c);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_root_css, NULL, &device_param->d_root_css_buf);
+ CL_err |= hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_markov_css, NULL, &device_param->d_markov_css_buf);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
if (size_esalts)
{
- device_param->d_esalt_bufs = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_esalts, NULL);
+ CL_err = hc_clCreateBuffer (data.ocl, device_param->context, CL_MEM_READ_ONLY, size_esalts, NULL, &device_param->d_esalt_bufs);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_esalt_bufs, CL_TRUE, 0, size_esalts, data.esalts_buf, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_esalt_bufs, CL_TRUE, 0, size_esalts, data.esalts_buf, 0, NULL, NULL);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
/**
* kernel args
*/
- device_param->kernel_params_buf32[21] = bitmap_mask;
- device_param->kernel_params_buf32[22] = bitmap_shift1;
- device_param->kernel_params_buf32[23] = bitmap_shift2;
- device_param->kernel_params_buf32[24] = 0; // salt_pos
- device_param->kernel_params_buf32[25] = 0; // loop_pos
- device_param->kernel_params_buf32[26] = 0; // loop_cnt
- device_param->kernel_params_buf32[27] = 0; // kernel_rules_cnt
- device_param->kernel_params_buf32[28] = 0; // digests_cnt
- device_param->kernel_params_buf32[29] = 0; // digests_offset
- device_param->kernel_params_buf32[30] = 0; // combs_mode
- device_param->kernel_params_buf32[31] = 0; // gid_max
+ device_param->kernel_params_buf32[24] = bitmap_mask;
+ device_param->kernel_params_buf32[25] = bitmap_shift1;
+ device_param->kernel_params_buf32[26] = bitmap_shift2;
+ device_param->kernel_params_buf32[27] = 0; // salt_pos
+ device_param->kernel_params_buf32[28] = 0; // loop_pos
+ device_param->kernel_params_buf32[29] = 0; // loop_cnt
+ device_param->kernel_params_buf32[30] = 0; // kernel_rules_cnt
+ device_param->kernel_params_buf32[31] = 0; // digests_cnt
+ device_param->kernel_params_buf32[32] = 0; // digests_offset
+ device_param->kernel_params_buf32[33] = 0; // combs_mode
+ device_param->kernel_params_buf32[34] = 0; // gid_max
device_param->kernel_params[ 0] = (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
? &device_param->d_pws_buf
device_param->kernel_params[17] = &device_param->d_salt_bufs;
device_param->kernel_params[18] = &device_param->d_esalt_bufs;
device_param->kernel_params[19] = &device_param->d_result;
- device_param->kernel_params[20] = &device_param->d_scryptV_buf;
- device_param->kernel_params[21] = &device_param->kernel_params_buf32[21];
- device_param->kernel_params[22] = &device_param->kernel_params_buf32[22];
- device_param->kernel_params[23] = &device_param->kernel_params_buf32[23];
+ device_param->kernel_params[20] = &device_param->d_scryptV0_buf;
+ device_param->kernel_params[21] = &device_param->d_scryptV1_buf;
+ device_param->kernel_params[22] = &device_param->d_scryptV2_buf;
+ device_param->kernel_params[23] = &device_param->d_scryptV3_buf;
device_param->kernel_params[24] = &device_param->kernel_params_buf32[24];
device_param->kernel_params[25] = &device_param->kernel_params_buf32[25];
device_param->kernel_params[26] = &device_param->kernel_params_buf32[26];
device_param->kernel_params[29] = &device_param->kernel_params_buf32[29];
device_param->kernel_params[30] = &device_param->kernel_params_buf32[30];
device_param->kernel_params[31] = &device_param->kernel_params_buf32[31];
+ device_param->kernel_params[32] = &device_param->kernel_params_buf32[32];
+ device_param->kernel_params[33] = &device_param->kernel_params_buf32[33];
+ device_param->kernel_params[34] = &device_param->kernel_params_buf32[34];
device_param->kernel_params_mp_buf64[3] = 0;
device_param->kernel_params_mp_buf32[4] = 0;
{
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_s%02d", kern_type, 4);
- device_param->kernel1 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel1);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_s%02d", kern_type, 8);
- device_param->kernel2 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel2);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_s%02d", kern_type, 16);
- device_param->kernel3 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel3);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else
{
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 4);
- device_param->kernel1 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel1);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 8);
- device_param->kernel2 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel2);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_m%02d", kern_type, 16);
- device_param->kernel3 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel3);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
if (data.attack_mode == ATTACK_MODE_BF)
{
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_tm", kern_type);
- device_param->kernel_tm = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel_tm);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_tm, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_tm, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
}
{
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_init", kern_type);
- device_param->kernel1 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel1);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_loop", kern_type);
- device_param->kernel2 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel2);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_comp", kern_type);
- device_param->kernel3 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel3);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
if (opts_type & OPTS_TYPE_HOOK12)
{
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_hook12", kern_type);
- device_param->kernel12 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel12);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel12, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel12, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ return -1;
+ }
}
if (opts_type & OPTS_TYPE_HOOK23)
{
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_hook23", kern_type);
- device_param->kernel23 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, kernel_name, &device_param->kernel23);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel23, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel23, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel1, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel2, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel3, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ CL_err |= hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel1, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ CL_err |= hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel2, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ CL_err |= hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel3, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
- for (uint i = 0; i <= 20; i++)
+ if (CL_err != CL_SUCCESS)
{
- hc_clSetKernelArg (data.ocl, device_param->kernel1, i, sizeof (cl_mem), device_param->kernel_params[i]);
- hc_clSetKernelArg (data.ocl, device_param->kernel2, i, sizeof (cl_mem), device_param->kernel_params[i]);
- hc_clSetKernelArg (data.ocl, device_param->kernel3, i, sizeof (cl_mem), device_param->kernel_params[i]);
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
- if (opts_type & OPTS_TYPE_HOOK12) hc_clSetKernelArg (data.ocl, device_param->kernel12, i, sizeof (cl_mem), device_param->kernel_params[i]);
- if (opts_type & OPTS_TYPE_HOOK23) hc_clSetKernelArg (data.ocl, device_param->kernel23, i, sizeof (cl_mem), device_param->kernel_params[i]);
+ return -1;
}
- for (uint i = 21; i <= 31; i++)
+ for (uint i = 0; i <= 23; i++)
{
- hc_clSetKernelArg (data.ocl, device_param->kernel1, i, sizeof (cl_uint), device_param->kernel_params[i]);
- hc_clSetKernelArg (data.ocl, device_param->kernel2, i, sizeof (cl_uint), device_param->kernel_params[i]);
- hc_clSetKernelArg (data.ocl, device_param->kernel3, i, sizeof (cl_uint), device_param->kernel_params[i]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel1, i, sizeof (cl_mem), device_param->kernel_params[i]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel2, i, sizeof (cl_mem), device_param->kernel_params[i]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel3, i, sizeof (cl_mem), device_param->kernel_params[i]);
+
+ if (opts_type & OPTS_TYPE_HOOK12) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel12, i, sizeof (cl_mem), device_param->kernel_params[i]);
+ if (opts_type & OPTS_TYPE_HOOK23) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel23, i, sizeof (cl_mem), device_param->kernel_params[i]);
- if (opts_type & OPTS_TYPE_HOOK12) hc_clSetKernelArg (data.ocl, device_param->kernel12, i, sizeof (cl_uint), device_param->kernel_params[i]);
- if (opts_type & OPTS_TYPE_HOOK23) hc_clSetKernelArg (data.ocl, device_param->kernel23, i, sizeof (cl_uint), device_param->kernel_params[i]);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+ }
+
+ for (uint i = 24; i <= 34; i++)
+ {
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel1, i, sizeof (cl_uint), device_param->kernel_params[i]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel2, i, sizeof (cl_uint), device_param->kernel_params[i]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel3, i, sizeof (cl_uint), device_param->kernel_params[i]);
+
+ if (opts_type & OPTS_TYPE_HOOK12) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel12, i, sizeof (cl_uint), device_param->kernel_params[i]);
+ if (opts_type & OPTS_TYPE_HOOK23) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel23, i, sizeof (cl_uint), device_param->kernel_params[i]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
// GPU memset
- device_param->kernel_memset = hc_clCreateKernel (data.ocl, device_param->program, "gpu_memset");
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program, "gpu_memset", &device_param->kernel_memset);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_memset, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ return -1;
+ }
- hc_clSetKernelArg (data.ocl, device_param->kernel_memset, 0, sizeof (cl_mem), device_param->kernel_params_memset[0]);
- hc_clSetKernelArg (data.ocl, device_param->kernel_memset, 1, sizeof (cl_uint), device_param->kernel_params_memset[1]);
- hc_clSetKernelArg (data.ocl, device_param->kernel_memset, 2, sizeof (cl_uint), device_param->kernel_params_memset[2]);
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_memset, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_memset, 0, sizeof (cl_mem), device_param->kernel_params_memset[0]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_memset, 1, sizeof (cl_uint), device_param->kernel_params_memset[1]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_memset, 2, sizeof (cl_uint), device_param->kernel_params_memset[2]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
// MP start
if (attack_mode == ATTACK_MODE_BF)
{
- device_param->kernel_mp_l = hc_clCreateKernel (data.ocl, device_param->program_mp, "l_markov");
- device_param->kernel_mp_r = hc_clCreateKernel (data.ocl, device_param->program_mp, "r_markov");
+ CL_err |= hc_clCreateKernel (data.ocl, device_param->program_mp, "l_markov", &device_param->kernel_mp_l);
+ CL_err |= hc_clCreateKernel (data.ocl, device_param->program_mp, "r_markov", &device_param->kernel_mp_r);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err |= hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp_l, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ CL_err |= hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp_r, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp_l, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp_r, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ return -1;
+ }
if (opts_type & OPTS_TYPE_PT_BITSLICE)
{
- hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 0, sizeof (cl_mem), device_param->kernel_params_tm[0]);
- hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 1, sizeof (cl_mem), device_param->kernel_params_tm[1]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 0, sizeof (cl_mem), device_param->kernel_params_tm[0]);
+ CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 1, sizeof (cl_mem), device_param->kernel_params_tm[1]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
else if (attack_mode == ATTACK_MODE_HYBRID1)
{
- device_param->kernel_mp = hc_clCreateKernel (data.ocl, device_param->program_mp, "C_markov");
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program_mp, "C_markov", &device_param->kernel_mp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
else if (attack_mode == ATTACK_MODE_HYBRID2)
{
- device_param->kernel_mp = hc_clCreateKernel (data.ocl, device_param->program_mp, "C_markov");
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program_mp, "C_markov", &device_param->kernel_mp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ return -1;
+ }
+
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
}
else
{
- device_param->kernel_amp = hc_clCreateKernel (data.ocl, device_param->program_amp, "amp");
+ CL_err = hc_clCreateKernel (data.ocl, device_param->program_amp, "amp", &device_param->kernel_amp);
- hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_amp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clCreateKernel(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err = hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_amp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clGetKernelWorkGroupInfo(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
for (uint i = 0; i < 5; i++)
{
- hc_clSetKernelArg (data.ocl, device_param->kernel_amp, i, sizeof (cl_mem), device_param->kernel_params_amp[i]);
+ CL_err = hc_clSetKernelArg (data.ocl, device_param->kernel_amp, i, sizeof (cl_mem), device_param->kernel_params_amp[i]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
for (uint i = 5; i < 7; i++)
{
- hc_clSetKernelArg (data.ocl, device_param->kernel_amp, i, sizeof (cl_uint), device_param->kernel_params_amp[i]);
+ CL_err = hc_clSetKernelArg (data.ocl, device_param->kernel_amp, i, sizeof (cl_uint), device_param->kernel_params_amp[i]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
}
}
const int fanspeed = hm_get_fanspeed_with_device_id (device_id);
const int fanpolicy = hm_get_fanpolicy_with_device_id (device_id);
- temp_retain_fanspeed_value[device_id] = fanspeed;
- temp_retain_fanpolicy_value[device_id] = fanpolicy;
-
// we also set it to tell the OS we take control over the fan and it's automatic controller
// if it was set to automatic. we do not control user-defined fanspeeds.
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef __linux__
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_TRUE);
+ #endif
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, fanspeed, 1);
+ #endif
}
if (rc == 0)
{
log_error ("ERROR: %s: %s", "stdin", strerror (errno));
- return (-1);
+ return -1;
}
if (_setmode (_fileno (stdout), _O_BINARY) == -1)
{
log_error ("ERROR: %s: %s", "stdout", strerror (errno));
- return (-1);
+ return -1;
}
if (_setmode (_fileno (stderr), _O_BINARY) == -1)
{
log_error ("ERROR: %s: %s", "stderr", strerror (errno));
- return (-1);
+ return -1;
}
#endif
wl_data->cnt = 0;
wl_data->pos = 0;
- uint wordlist_mode = ((optind + 1) < myargc) ? WL_MODE_FILE : WL_MODE_STDIN;
-
- data.wordlist_mode = wordlist_mode;
-
cs_t *css_buf = NULL;
uint css_cnt = 0;
uint dictcnt = 0;
{
log_error ("ERROR: %s: %s", l0_filename, strerror (errno));
- return (-1);
+ return -1;
}
uint is_dir = S_ISDIR (l0_stat.st_mode);
if (keyspace == 1)
{
- log_error ("ERROR: keyspace parameter is not allowed together with a directory");
+ log_error ("ERROR: Keyspace parameter is not allowed together with a directory");
- return (-1);
+ return -1;
}
char **dictionary_files = NULL;
{
log_error ("ERROR: %s: %s", l1_filename, strerror (errno));
- return (-1);
+ return -1;
}
if (S_ISREG (l1_stat.st_mode))
{
log_error ("ERROR: No usable dictionary file found.");
- return (-1);
+ return -1;
}
}
else if (wordlist_mode == WL_MODE_STDIN)
{
log_error ("ERROR: %s: %s", dictfile1, strerror (errno));
- return (-1);
+ return -1;
}
if (stat (dictfile1, &tmp_stat) == -1)
fclose (fp1);
- return (-1);
+ return -1;
}
if (S_ISDIR (tmp_stat.st_mode))
fclose (fp1);
- return (-1);
+ return -1;
}
if ((fp2 = fopen (dictfile2, "rb")) == NULL)
fclose (fp1);
- return (-1);
+ return -1;
}
if (stat (dictfile2, &tmp_stat) == -1)
fclose (fp1);
fclose (fp2);
- return (-1);
+ return -1;
}
if (S_ISDIR (tmp_stat.st_mode))
fclose (fp1);
fclose (fp2);
- return (-1);
+ return -1;
}
data.combs_cnt = 1;
fclose (fp1);
fclose (fp2);
- return (-1);
+ return -1;
}
data.combs_cnt = 1;
fclose (fp1);
fclose (fp2);
- return (-1);
+ return -1;
}
fclose (fp1);
{
log_error ("ERROR: %s: %s", mask, strerror (errno));
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: %s: %s", mask, strerror (errno));
- return (-1);
+ return -1;
}
char *line_buf = (char *) mymalloc (HCBUFSIZ);
{
log_error ("ERROR: %s: unsupported file-type", mask);
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: %s: %s", mask, strerror (errno));
- return (-1);
+ return -1;
}
char *line_buf = (char *) mymalloc (HCBUFSIZ);
{
log_error ("ERROR: %s: %s", filename, strerror (errno));
- return (-1);
+ return -1;
}
uint is_dir = S_ISDIR (file_stat.st_mode);
if (keyspace == 1)
{
- log_error ("ERROR: keyspace parameter is not allowed together with a directory");
+ log_error ("ERROR: Keyspace parameter is not allowed together with a directory");
- return (-1);
+ return -1;
}
char **dictionary_files = NULL;
{
log_error ("ERROR: %s: %s", l1_filename, strerror (errno));
- return (-1);
+ return -1;
}
if (S_ISREG (l1_stat.st_mode))
{
log_error ("ERROR: No usable dictionary file found.");
- return (-1);
+ return -1;
}
if (increment)
{
log_error ("ERROR: %s: %s", mask, strerror (errno));
- return (-1);
+ return -1;
}
char *line_buf = (char *) mymalloc (HCBUFSIZ);
{
log_error ("ERROR: %s: %s", filename, strerror (errno));
- return (-1);
+ return -1;
}
uint is_dir = S_ISDIR (file_stat.st_mode);
if (keyspace == 1)
{
- log_error ("ERROR: keyspace parameter is not allowed together with a directory");
+ log_error ("ERROR: Keyspace parameter is not allowed together with a directory");
- return (-1);
+ return -1;
}
char **dictionary_files = NULL;
{
log_error ("ERROR: %s: %s", l1_filename, strerror (errno));
- return (-1);
+ return -1;
}
if (S_ISREG (l1_stat.st_mode))
{
log_error ("ERROR: No usable dictionary file found.");
- return (-1);
+ return -1;
}
if (increment)
* status and monitor threads
*/
- if (data.devices_status != STATUS_CRACKED) data.devices_status = STATUS_STARTING;
-
- uint i_threads_cnt = 0;
-
- hc_thread_t *i_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
-
- if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
- if (stdout_flag == 0)
- {
- hc_thread_create (i_threads[i_threads_cnt], thread_keypress, &benchmark);
-
- i_threads_cnt++;
- }
+ data.devices_status = STATUS_STARTING;
}
- if (wordlist_mode == WL_MODE_STDIN) data.status = 1;
-
- uint ni_threads_cnt = 0;
+ uint inner_threads_cnt = 0;
- hc_thread_t *ni_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
+ hc_thread_t *inner_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
- if (stdout_flag == 0)
- {
- hc_thread_create (ni_threads[ni_threads_cnt], thread_monitor, NULL);
-
- ni_threads_cnt++;
- }
+ data.shutdown_inner = 0;
/**
* Outfile remove
*/
- if (keyspace == 0)
+ if (keyspace == 0 && benchmark == 0 && stdout_flag == 0)
{
+ hc_thread_create (inner_threads[inner_threads_cnt], thread_monitor, NULL);
+
+ inner_threads_cnt++;
+
if (outfile_check_timer != 0)
{
if (data.outfile_check_directory != NULL)
!((hash_mode >= 13700) && (hash_mode <= 13799)) &&
(hash_mode != 9000))
{
- hc_thread_create (ni_threads[ni_threads_cnt], thread_outfile_remove, NULL);
+ hc_thread_create (inner_threads[inner_threads_cnt], thread_outfile_remove, NULL);
- ni_threads_cnt++;
+ inner_threads_cnt++;
}
else
{
{
if (potfile_remove_cracks > 0)
{
- if (potfile_remove_cracks == 1) log_info ("INFO: removed 1 hash found in pot file\n");
- else log_info ("INFO: removed %u hashes found in pot file\n", potfile_remove_cracks);
+ if (potfile_remove_cracks == 1) log_info ("INFO: Removed 1 hash found in pot file\n");
+ else log_info ("INFO: Removed %u hashes found in pot file\n", potfile_remove_cracks);
}
}
for (uint maskpos = rd->maskpos; maskpos < maskcnt; maskpos++)
{
- if (data.devices_status == STATUS_CRACKED) break;
-
- data.devices_status = STATUS_INIT;
+ if (data.devices_status == STATUS_CRACKED) continue;
+ if (data.devices_status == STATUS_ABORTED) continue;
+ if (data.devices_status == STATUS_QUIT) continue;
if (maskpos > rd->maskpos)
{
mask = mask + str_pos + 1;
}
+
+ /**
+ * What follows is a very special case where "\," is within the mask field of a line in a .hcmask file only because otherwise (without the "\")
+ * it would be interpreted as a custom charset definition.
+ *
+ * We need to replace all "\," with just "," within the mask (but allow the special case "\\," which means "\" followed by ",")
+ * Note: "\\" is not needed to replace all "\" within the mask! The meaning of "\\" within a line containing the string "\\," is just to allow "\" followed by ","
+ */
+
+ uint mask_len_cur = strlen (mask);
+
+ uint mask_out_pos = 0;
+ char mask_prev = 0;
+
+ for (uint mask_iter = 0; mask_iter < mask_len_cur; mask_iter++, mask_out_pos++)
+ {
+ if (mask[mask_iter] == ',')
+ {
+ if (mask_prev == '\\')
+ {
+ mask_out_pos -= 1; // this means: skip the previous "\"
+ }
+ }
+
+ mask_prev = mask[mask_iter];
+
+ mask[mask_out_pos] = mask[mask_iter];
+ }
+
+ mask[mask_out_pos] = '\0';
}
if ((attack_mode == ATTACK_MODE_HYBRID1) || (attack_mode == ATTACK_MODE_HYBRID2))
device_param->kernel_params_mp_buf32[7] = 0;
}
- for (uint i = 0; i < 3; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp, i, sizeof (cl_mem), (void *) device_param->kernel_params_mp[i]);
- for (uint i = 3; i < 4; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp, i, sizeof (cl_ulong), (void *) device_param->kernel_params_mp[i]);
- for (uint i = 4; i < 8; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp, i, sizeof (cl_uint), (void *) device_param->kernel_params_mp[i]);
+ cl_int CL_err = CL_SUCCESS;
+
+ for (uint i = 0; i < 3; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp, i, sizeof (cl_mem), (void *) device_param->kernel_params_mp[i]);
+ for (uint i = 3; i < 4; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp, i, sizeof (cl_ulong), (void *) device_param->kernel_params_mp[i]);
+ for (uint i = 4; i < 8; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp, i, sizeof (cl_uint), (void *) device_param->kernel_params_mp[i]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_root_css_buf, CL_TRUE, 0, device_param->size_root_css, root_css_buf, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_markov_css_buf, CL_TRUE, 0, device_param->size_markov_css, markov_css_buf, 0, NULL, NULL);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_root_css_buf, CL_TRUE, 0, device_param->size_root_css, root_css_buf, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_markov_css_buf, CL_TRUE, 0, device_param->size_markov_css, markov_css_buf, 0, NULL, NULL);
+ return -1;
+ }
}
}
else if (attack_mode == ATTACK_MODE_BF)
{
log_error ("ERROR: Mask is too small");
- return (-1);
+ return -1;
}
}
}
{
log_error ("ERROR: --keyspace is not supported with --increment or mask files");
- return (-1);
+ return -1;
}
}
- for (uint dictpos = rd->dictpos; dictpos < dictcnt; )
+ for (uint dictpos = rd->dictpos; dictpos < dictcnt; dictpos++)
{
+ if (data.devices_status == STATUS_CRACKED) continue;
+ if (data.devices_status == STATUS_ABORTED) continue;
+ if (data.devices_status == STATUS_QUIT) continue;
+
+ rd->dictpos = dictpos;
+
char *subid = logfile_generate_subid ();
data.subid = subid;
logfile_sub_msg ("START");
- data.devices_status = STATUS_INIT;
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_INIT;
+ }
memset (data.words_progress_done, 0, data.salts_cnt * sizeof (u64));
memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (u64));
{
log_error ("ERROR: %s: %s", dictfile, strerror (errno));
- return (-1);
+ return -1;
}
data.words_cnt = count_words (wl_data, fd2, dictfile, dictstat_base, &dictstat_nmemb);
if (data.words_cnt == 0)
{
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
-
- dictpos++;
+ logfile_sub_msg ("STOP");
continue;
}
{
log_error ("ERROR: %s: %s", dictfile, strerror (errno));
- return (-1);
+ return -1;
}
data.words_cnt = count_words (wl_data, fd2, dictfile, dictstat_base, &dictstat_nmemb);
{
log_error ("ERROR: %s: %s", dictfile2, strerror (errno));
- return (-1);
+ return -1;
}
data.words_cnt = count_words (wl_data, fd2, dictfile2, dictstat_base, &dictstat_nmemb);
if (data.words_cnt == 0)
{
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
-
- dictpos++;
+ logfile_sub_msg ("STOP");
continue;
}
{
log_error ("ERROR: %s: %s", dictfile, strerror (errno));
- return (-1);
+ return -1;
}
data.words_cnt = count_words (wl_data, fd2, dictfile, dictstat_base, &dictstat_nmemb);
if (data.words_cnt == 0)
{
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
-
- dictpos++;
+ logfile_sub_msg ("STOP");
continue;
}
{
if (css_cnt < mask_min)
{
- log_info ("WARNING: skipping mask '%s' because it is smaller than the minimum password length", mask);
+ log_info ("WARNING: Skipping mask '%s' because it is smaller than the minimum password length", mask);
}
if (css_cnt > mask_max)
{
- log_info ("WARNING: skipping mask '%s' because it is larger than the maximum password length", mask);
+ log_info ("WARNING: Skipping mask '%s' because it is larger than the maximum password length", mask);
}
// skip to next mask
- dictpos++;
-
- rd->dictpos = dictpos;
-
logfile_sub_msg ("STOP");
continue;
device_param->kernel_params_mp_r_buf32[6] = 0;
device_param->kernel_params_mp_r_buf32[7] = 0;
- for (uint i = 0; i < 3; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp_l, i, sizeof (cl_mem), (void *) device_param->kernel_params_mp_l[i]);
- for (uint i = 3; i < 4; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp_l, i, sizeof (cl_ulong), (void *) device_param->kernel_params_mp_l[i]);
- for (uint i = 4; i < 9; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp_l, i, sizeof (cl_uint), (void *) device_param->kernel_params_mp_l[i]);
+ cl_int CL_err = CL_SUCCESS;
+
+ for (uint i = 0; i < 3; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp_l, i, sizeof (cl_mem), (void *) device_param->kernel_params_mp_l[i]);
+ for (uint i = 3; i < 4; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp_l, i, sizeof (cl_ulong), (void *) device_param->kernel_params_mp_l[i]);
+ for (uint i = 4; i < 9; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp_l, i, sizeof (cl_uint), (void *) device_param->kernel_params_mp_l[i]);
+
+ for (uint i = 0; i < 3; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp_r, i, sizeof (cl_mem), (void *) device_param->kernel_params_mp_r[i]);
+ for (uint i = 3; i < 4; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp_r, i, sizeof (cl_ulong), (void *) device_param->kernel_params_mp_r[i]);
+ for (uint i = 4; i < 8; i++) CL_err |= hc_clSetKernelArg (data.ocl, device_param->kernel_mp_r, i, sizeof (cl_uint), (void *) device_param->kernel_params_mp_r[i]);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clSetKernelArg(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
+ }
+
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_root_css_buf, CL_TRUE, 0, device_param->size_root_css, root_css_buf, 0, NULL, NULL);
+ CL_err |= hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_markov_css_buf, CL_TRUE, 0, device_param->size_markov_css, markov_css_buf, 0, NULL, NULL);
- for (uint i = 0; i < 3; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp_r, i, sizeof (cl_mem), (void *) device_param->kernel_params_mp_r[i]);
- for (uint i = 3; i < 4; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp_r, i, sizeof (cl_ulong), (void *) device_param->kernel_params_mp_r[i]);
- for (uint i = 4; i < 8; i++) hc_clSetKernelArg (data.ocl, device_param->kernel_mp_r, i, sizeof (cl_uint), (void *) device_param->kernel_params_mp_r[i]);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clEnqueueWriteBuffer(): %s\n", val2cstr_cl (CL_err));
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_root_css_buf, CL_TRUE, 0, device_param->size_root_css, root_css_buf, 0, NULL, NULL);
- hc_clEnqueueWriteBuffer (data.ocl, device_param->command_queue, device_param->d_markov_css_buf, CL_TRUE, 0, device_param->size_markov_css, markov_css_buf, 0, NULL, NULL);
+ return -1;
+ }
}
}
{
log_info ("%llu", (unsigned long long int) words_base);
- return (0);
+ return 0;
}
if (data.words_cur > data.words_base)
{
- log_error ("ERROR: restore value greater keyspace");
+ log_error ("ERROR: Restore value greater keyspace");
- return (-1);
+ return -1;
}
if (data.words_cur)
hc_thread_t *c_threads = (hc_thread_t *) mycalloc (data.devices_cnt, sizeof (hc_thread_t));
- data.devices_status = STATUS_AUTOTUNE;
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_AUTOTUNE;
+ }
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
{
if (quiet == 0)
{
+ clear_prompt ();
+
log_info ("ATTENTION!");
log_info (" The wordlist or mask you are using is too small.");
log_info (" Therefore, hashcat is unable to utilize the full parallelization power of your device(s).");
* create cracker threads
*/
- data.devices_status = STATUS_RUNNING;
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_RUNNING;
+ }
if (initial_restore_done == 0)
{
local_free (c_threads);
- data.restore = 0;
-
- // finalize task
+ if ((data.devices_status != STATUS_BYPASS) && (data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_EXHAUSTED;
+ }
logfile_sub_var_uint ("status-after-work", data.devices_status);
- if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
-
- if (data.devices_status == STATUS_CRACKED) break;
- if (data.devices_status == STATUS_ABORTED) break;
-
- if (data.devices_status == STATUS_BYPASS)
- {
- data.devices_status = STATUS_RUNNING;
- }
+ data.restore = 0;
if (induction_dictionaries_cnt)
{
induction_dictionaries_cnt = count_dictionaries (induction_dictionaries);
}
- if (benchmark == 0)
+ if (benchmark == 1)
+ {
+ status_benchmark ();
+
+ if (machine_readable == 0)
+ {
+ log_info ("");
+ }
+ }
+ else
{
- if (((dictpos + 1) < dictcnt) || ((maskpos + 1) < maskcnt) || induction_dictionaries_cnt)
+ if (quiet == 0)
{
- if (quiet == 0) clear_prompt ();
+ clear_prompt ();
+
+ log_info ("");
- if (quiet == 0) log_info ("");
+ status_display ();
+ log_info ("");
+ }
+ else
+ {
if (status == 1)
{
status_display ();
}
- else
- {
- if (quiet == 0) status_display ();
- }
-
- if (quiet == 0) log_info ("");
}
}
- if (attack_mode == ATTACK_MODE_BF)
+ if (induction_dictionaries_cnt)
{
- dictpos++;
+ qsort (induction_dictionaries, induction_dictionaries_cnt, sizeof (char *), sort_by_mtime);
- rd->dictpos = dictpos;
- }
- else
- {
- if (induction_dictionaries_cnt)
- {
- qsort (induction_dictionaries, induction_dictionaries_cnt, sizeof (char *), sort_by_mtime);
- }
- else
- {
- dictpos++;
+ // yeah, this next statement is a little hack to make sure that --loopback runs correctly (because with it we guarantee that the loop iterates one more time)
- rd->dictpos = dictpos;
- }
+ dictpos--;
}
time_t runtime_stop;
logfile_sub_msg ("STOP");
global_free (subid);
- }
- if (data.devices_status == STATUS_STOP_AT_CHECKPOINT) check_checkpoint ();
+ // from this point we handle bypass as running
+
+ if (data.devices_status == STATUS_BYPASS)
+ {
+ data.devices_status = STATUS_RUNNING;
+ }
+
+ // and overwrite benchmark aborts as well
+
+ if (data.benchmark == 1)
+ {
+ if (data.devices_status == STATUS_ABORTED)
+ {
+ data.devices_status = STATUS_RUNNING;
+ }
+ }
+
+ // finalize task
+
+ if (data.devices_status == STATUS_CRACKED) break;
+ if (data.devices_status == STATUS_ABORTED) break;
+ if (data.devices_status == STATUS_QUIT) break;
+ }
if (data.devices_status == STATUS_CRACKED) break;
if (data.devices_status == STATUS_ABORTED) break;
if (data.devices_status == STATUS_QUIT) break;
-
- if (data.devices_status == STATUS_BYPASS)
- {
- data.devices_status = STATUS_RUNNING;
- }
}
// problems could occur if already at startup everything was cracked (because of .pot file reading etc), we must set some variables here to avoid NULL pointers
-
if (attack_mode == ATTACK_MODE_STRAIGHT)
{
if (data.wordlist_mode == WL_MODE_FILE)
}
}
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
- {
- data.devices_status = STATUS_EXHAUSTED;
- }
-
// if cracked / aborted remove last induction dictionary
for (int file_pos = 0; file_pos < induction_dictionaries_cnt; file_pos++)
}
}
- // wait for non-interactive threads
-
- for (uint thread_idx = 0; thread_idx < ni_threads_cnt; thread_idx++)
- {
- hc_thread_wait (1, &ni_threads[thread_idx]);
- }
-
- local_free (ni_threads);
+ // wait for inner threads
- // wait for interactive threads
+ data.shutdown_inner = 1;
- for (uint thread_idx = 0; thread_idx < i_threads_cnt; thread_idx++)
+ for (uint thread_idx = 0; thread_idx < inner_threads_cnt; thread_idx++)
{
- hc_thread_wait (1, &i_threads[thread_idx]);
+ hc_thread_wait (1, &inner_threads[thread_idx]);
}
- local_free (i_threads);
+ local_free (inner_threads);
// we dont need restore file anymore
if (data.restore_disable == 0)
* Clean up
*/
- if (benchmark == 1)
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
- status_benchmark ();
+ hc_device_param_t *device_param = &data.devices_param[device_id];
- if (machine_readable == 0)
- {
- log_info ("");
- }
- }
- else
- {
- if (quiet == 0) clear_prompt ();
+ if (device_param->skipped) continue;
- if (quiet == 0) log_info ("");
+ cl_int CL_err = CL_SUCCESS;
- if (status == 1)
- {
- status_display ();
+ local_free (device_param->combs_buf);
+ local_free (device_param->hooks_buf);
+ local_free (device_param->device_name);
+ local_free (device_param->device_name_chksum);
+ local_free (device_param->device_version);
+ local_free (device_param->driver_version);
+
+ if (device_param->pws_buf) myfree (device_param->pws_buf);
+
+ if (device_param->d_pws_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_pws_buf);
+ if (device_param->d_pws_amp_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_pws_amp_buf);
+ if (device_param->d_rules) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_rules);
+ if (device_param->d_rules_c) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_rules_c);
+ if (device_param->d_combs) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_combs);
+ if (device_param->d_combs_c) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_combs_c);
+ if (device_param->d_bfs) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bfs);
+ if (device_param->d_bfs_c) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bfs_c);
+ if (device_param->d_bitmap_s1_a) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_a);
+ if (device_param->d_bitmap_s1_b) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_b);
+ if (device_param->d_bitmap_s1_c) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_c);
+ if (device_param->d_bitmap_s1_d) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_d);
+ if (device_param->d_bitmap_s2_a) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_a);
+ if (device_param->d_bitmap_s2_b) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_b);
+ if (device_param->d_bitmap_s2_c) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_c);
+ if (device_param->d_bitmap_s2_d) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_d);
+ if (device_param->d_plain_bufs) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_plain_bufs);
+ if (device_param->d_digests_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_digests_buf);
+ if (device_param->d_digests_shown) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_digests_shown);
+ if (device_param->d_salt_bufs) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_salt_bufs);
+ if (device_param->d_esalt_bufs) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_esalt_bufs);
+ if (device_param->d_tmps) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_tmps);
+ if (device_param->d_hooks) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_hooks);
+ if (device_param->d_result) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_result);
+ if (device_param->d_scryptV0_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_scryptV0_buf);
+ if (device_param->d_scryptV1_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_scryptV1_buf);
+ if (device_param->d_scryptV2_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_scryptV2_buf);
+ if (device_param->d_scryptV3_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_scryptV3_buf);
+ if (device_param->d_root_css_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_root_css_buf);
+ if (device_param->d_markov_css_buf) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_markov_css_buf);
+ if (device_param->d_tm_c) CL_err |= hc_clReleaseMemObject (data.ocl, device_param->d_tm_c);
+
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clReleaseMemObject(): %s\n", val2cstr_cl (CL_err));
+
+ return -1;
}
- else
+
+ if (device_param->kernel1) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel1);
+ if (device_param->kernel12) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel12);
+ if (device_param->kernel2) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel2);
+ if (device_param->kernel23) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel23);
+ if (device_param->kernel3) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel3);
+ if (device_param->kernel_mp) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel_mp);
+ if (device_param->kernel_mp_l) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel_mp_l);
+ if (device_param->kernel_mp_r) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel_mp_r);
+ if (device_param->kernel_tm) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel_tm);
+ if (device_param->kernel_amp) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel_amp);
+ if (device_param->kernel_memset) CL_err |= hc_clReleaseKernel (data.ocl, device_param->kernel_memset);
+
+ if (CL_err != CL_SUCCESS)
{
- if (quiet == 0) status_display ();
- }
+ log_error ("ERROR: clReleaseKernel(): %s\n", val2cstr_cl (CL_err));
- if (quiet == 0) log_info ("");
- }
+ return -1;
+ }
- for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
- {
- hc_device_param_t *device_param = &data.devices_param[device_id];
+ if (device_param->program) CL_err |= hc_clReleaseProgram (data.ocl, device_param->program);
+ if (device_param->program_mp) CL_err |= hc_clReleaseProgram (data.ocl, device_param->program_mp);
+ if (device_param->program_amp) CL_err |= hc_clReleaseProgram (data.ocl, device_param->program_amp);
- if (device_param->skipped) continue;
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clReleaseProgram(): %s\n", val2cstr_cl (CL_err));
- local_free (device_param->combs_buf);
+ return -1;
+ }
- local_free (device_param->hooks_buf);
+ if (device_param->command_queue) CL_err |= hc_clReleaseCommandQueue (data.ocl, device_param->command_queue);
- local_free (device_param->device_name);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: clReleaseCommandQueue(): %s\n", val2cstr_cl (CL_err));
- local_free (device_param->device_name_chksum);
+ return -1;
+ }
- local_free (device_param->device_version);
+ if (device_param->context) CL_err |= hc_clReleaseContext (data.ocl, device_param->context);
- local_free (device_param->driver_version);
+ if (CL_err != CL_SUCCESS)
+ {
+ log_error ("ERROR: hc_clReleaseContext(): %s\n", val2cstr_cl (CL_err));
- if (device_param->pws_buf) myfree (device_param->pws_buf);
- if (device_param->d_pws_buf) hc_clReleaseMemObject (data.ocl, device_param->d_pws_buf);
- if (device_param->d_pws_amp_buf) hc_clReleaseMemObject (data.ocl, device_param->d_pws_amp_buf);
- if (device_param->d_rules) hc_clReleaseMemObject (data.ocl, device_param->d_rules);
- if (device_param->d_rules_c) hc_clReleaseMemObject (data.ocl, device_param->d_rules_c);
- if (device_param->d_combs) hc_clReleaseMemObject (data.ocl, device_param->d_combs);
- if (device_param->d_combs_c) hc_clReleaseMemObject (data.ocl, device_param->d_combs_c);
- if (device_param->d_bfs) hc_clReleaseMemObject (data.ocl, device_param->d_bfs);
- if (device_param->d_bfs_c) hc_clReleaseMemObject (data.ocl, device_param->d_bfs_c);
- if (device_param->d_bitmap_s1_a) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_a);
- if (device_param->d_bitmap_s1_b) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_b);
- if (device_param->d_bitmap_s1_c) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_c);
- if (device_param->d_bitmap_s1_d) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s1_d);
- if (device_param->d_bitmap_s2_a) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_a);
- if (device_param->d_bitmap_s2_b) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_b);
- if (device_param->d_bitmap_s2_c) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_c);
- if (device_param->d_bitmap_s2_d) hc_clReleaseMemObject (data.ocl, device_param->d_bitmap_s2_d);
- if (device_param->d_plain_bufs) hc_clReleaseMemObject (data.ocl, device_param->d_plain_bufs);
- if (device_param->d_digests_buf) hc_clReleaseMemObject (data.ocl, device_param->d_digests_buf);
- if (device_param->d_digests_shown) hc_clReleaseMemObject (data.ocl, device_param->d_digests_shown);
- if (device_param->d_salt_bufs) hc_clReleaseMemObject (data.ocl, device_param->d_salt_bufs);
- if (device_param->d_esalt_bufs) hc_clReleaseMemObject (data.ocl, device_param->d_esalt_bufs);
- if (device_param->d_tmps) hc_clReleaseMemObject (data.ocl, device_param->d_tmps);
- if (device_param->d_hooks) hc_clReleaseMemObject (data.ocl, device_param->d_hooks);
- if (device_param->d_result) hc_clReleaseMemObject (data.ocl, device_param->d_result);
- if (device_param->d_scryptV_buf) hc_clReleaseMemObject (data.ocl, device_param->d_scryptV_buf);
- if (device_param->d_root_css_buf) hc_clReleaseMemObject (data.ocl, device_param->d_root_css_buf);
- if (device_param->d_markov_css_buf) hc_clReleaseMemObject (data.ocl, device_param->d_markov_css_buf);
- if (device_param->d_tm_c) hc_clReleaseMemObject (data.ocl, device_param->d_tm_c);
-
- if (device_param->kernel1) hc_clReleaseKernel (data.ocl, device_param->kernel1);
- if (device_param->kernel12) hc_clReleaseKernel (data.ocl, device_param->kernel12);
- if (device_param->kernel2) hc_clReleaseKernel (data.ocl, device_param->kernel2);
- if (device_param->kernel23) hc_clReleaseKernel (data.ocl, device_param->kernel23);
- if (device_param->kernel3) hc_clReleaseKernel (data.ocl, device_param->kernel3);
- if (device_param->kernel_mp) hc_clReleaseKernel (data.ocl, device_param->kernel_mp);
- if (device_param->kernel_mp_l) hc_clReleaseKernel (data.ocl, device_param->kernel_mp_l);
- if (device_param->kernel_mp_r) hc_clReleaseKernel (data.ocl, device_param->kernel_mp_r);
- if (device_param->kernel_tm) hc_clReleaseKernel (data.ocl, device_param->kernel_tm);
- if (device_param->kernel_amp) hc_clReleaseKernel (data.ocl, device_param->kernel_amp);
- if (device_param->kernel_memset) hc_clReleaseKernel (data.ocl, device_param->kernel_memset);
-
- if (device_param->program) hc_clReleaseProgram (data.ocl, device_param->program);
- if (device_param->program_mp) hc_clReleaseProgram (data.ocl, device_param->program_mp);
- if (device_param->program_amp) hc_clReleaseProgram (data.ocl, device_param->program_amp);
-
- if (device_param->command_queue) hc_clReleaseCommandQueue (data.ocl, device_param->command_queue);
- if (device_param->context) hc_clReleaseContext (data.ocl, device_param->context);
+ return -1;
+ }
}
// reset default fan speed
#ifdef HAVE_HWMON
if (gpu_temp_disable == 0)
{
- if (gpu_temp_retain != 0) // VENDOR_ID_AMD is implied here
+ if (gpu_temp_retain != 0)
{
hc_thread_mutex_lock (mux_adl);
if (data.hm_device[device_id].fan_set_supported == 1)
{
- int fanspeed = temp_retain_fanspeed_value[device_id];
- int fanpolicy = temp_retain_fanpolicy_value[device_id];
+ int rc = -1;
- if (fanpolicy == 1)
+ if (device_param->device_vendor_id == VENDOR_ID_AMD)
{
- int rc = -1;
-
- if (device_param->device_vendor_id == VENDOR_ID_AMD)
- {
- rc = hm_set_fanspeed_with_device_id_adl (device_id, fanspeed, 0);
- }
- else if (device_param->device_vendor_id == VENDOR_ID_NV)
- {
-
- }
+ rc = hm_set_fanspeed_with_device_id_adl (device_id, 100, 0);
+ }
+ else if (device_param->device_vendor_id == VENDOR_ID_NV)
+ {
+ #ifdef __linux__
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_FALSE);
+ #endif
- if (rc == -1) log_info ("WARNING: Failed to restore default fan speed and policy for device #%", device_id + 1);
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, 100, 0);
+ #endif
}
+
+ if (rc == -1) log_info ("WARNING: Failed to restore default fan speed and policy for device #%", device_id + 1);
}
}
// reset power tuning
- if (powertune_enable == 1) // VENDOR_ID_AMD is implied here
+ if (powertune_enable == 1)
{
hc_thread_mutex_lock (mux_adl);
{
log_error ("ERROR: Failed to get ADL PowerControl Capabilities");
- return (-1);
+ return -1;
}
if (powertune_supported != 0)
{
log_info ("ERROR: Failed to restore the ADL PowerControl values");
- return (-1);
+ return -1;
}
// clocks
{
log_info ("ERROR: Failed to restore ADL performance state");
- return (-1);
+ return -1;
}
local_free (performance_state);
data.hm_nvml = NULL;
}
+ if (data.hm_nvapi)
+ {
+ hm_NvAPI_Unload (data.hm_nvapi);
+
+ nvapi_close (data.hm_nvapi);
+
+ data.hm_nvapi = NULL;
+ }
+
+ if (data.hm_xnvctrl)
+ {
+ hm_XNVCTRL_XCloseDisplay (data.hm_xnvctrl);
+
+ xnvctrl_close (data.hm_xnvctrl);
+
+ data.hm_xnvctrl = NULL;
+ }
+
if (data.hm_adl)
{
hm_ADL_Main_Control_Destroy (data.hm_adl);
local_free (bitmap_s2_d);
#ifdef HAVE_HWMON
- local_free (temp_retain_fanspeed_value);
local_free (od_clock_mem_status);
local_free (od_power_control_status);
local_free (nvml_power_limit);
if (data.devices_status == STATUS_QUIT) break;
}
+ // wait for outer threads
+
+ data.shutdown_outer = 1;
+
+ for (uint thread_idx = 0; thread_idx < outer_threads_cnt; thread_idx++)
+ {
+ hc_thread_wait (1, &outer_threads[thread_idx]);
+ }
+
+ local_free (outer_threads);
+
// destroy others mutex
hc_thread_mutex_delete (mux_dispatcher);
{
log_error ("ERROR: %s: %s", induction_directory, strerror (errno));
- return (-1);
+ return -1;
}
}
{
log_error ("ERROR: %s: %s", outfile_check_directory, strerror (errno));
- return (-1);
+ return -1;
}
}