#define MAX_DICTSTAT 10000
-#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 136
+#define NUM_DEFAULT_BENCHMARK_ALGORITHMS 137
#define global_free(attr) \
{ \
133,
13500,
11600,
+ 13600,
12500,
13000,
13200,
" 13000 = RAR5",
" 13200 = AxCrypt",
" 13300 = AxCrypt in memory SHA1",
+ " 13600 = WinZip",
"",
"[[ Full-Disk encryptions (FDE) ]]",
"",
speed_ms[device_id] /= SPEED_CACHE;
}
- float hashes_all_ms = 0;
+ double hashes_all_ms = 0;
- float hashes_dev_ms[DEVICES_MAX] = { 0 };
+ double hashes_dev_ms[DEVICES_MAX] = { 0 };
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
if (speed_ms[device_id])
{
- hashes_dev_ms[device_id] = speed_cnt[device_id] / speed_ms[device_id];
+ hashes_dev_ms[device_id] = (double) speed_cnt[device_id] / speed_ms[device_id];
hashes_all_ms += hashes_dev_ms[device_id];
}
#endif // HAVE_HWMON
}
+static void status_benchmark_automat ()
+{
+ u64 speed_cnt[DEVICES_MAX] = { 0 };
+ double speed_ms[DEVICES_MAX] = { 0 };
+
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ speed_cnt[device_id] = device_param->speed_cnt[0];
+ speed_ms[device_id] = device_param->speed_ms[0];
+ }
+
+ double hashes_dev_ms[DEVICES_MAX] = { 0 };
+
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ hashes_dev_ms[device_id] = 0;
+
+ if (speed_ms[device_id])
+ {
+ hashes_dev_ms[device_id] = (double) speed_cnt[device_id] / speed_ms[device_id];
+ }
+ }
+
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if (device_param->skipped) continue;
+
+ log_info ("%u:%u:%llu", device_id + 1, data.hash_mode, (unsigned long long int) (hashes_dev_ms[device_id] * 1000));
+ }
+}
+
static void status_benchmark ()
{
if (data.devices_status == STATUS_INIT) return;
if (data.devices_status == STATUS_STARTING) return;
+ if (data.devices_status == STATUS_BYPASS) return;
- if (data.words_cnt == 0) return;
+ if (data.status_automat == 1)
+ {
+ status_benchmark_automat ();
+
+ return;
+ }
u64 speed_cnt[DEVICES_MAX] = { 0 };
double speed_ms[DEVICES_MAX] = { 0 };
speed_ms[device_id] = device_param->speed_ms[0];
}
- float hashes_all_ms = 0;
+ double hashes_all_ms = 0;
- float hashes_dev_ms[DEVICES_MAX] = { 0 };
+ double hashes_dev_ms[DEVICES_MAX] = { 0 };
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
if (speed_ms[device_id])
{
- hashes_dev_ms[device_id] = speed_cnt[device_id] / speed_ms[device_id];
+ hashes_dev_ms[device_id] = (double) speed_cnt[device_id] / speed_ms[device_id];
hashes_all_ms += hashes_dev_ms[device_id];
}
if ((quiet == 0) && (debug_file == NULL))
{
fprintf (stdout, "%s", PROMPT);
+
fflush (stdout);
}
}
{
clear_prompt ();
- log_info ("");
+ //log_info ("");
log_info ("INFO: approaching final keyspace, workload adjusted");
-
log_info ("");
fprintf (stdout, "%s", PROMPT);
hc_clSetKernelArg (data.ocl, kernel, 30, sizeof (cl_uint), device_param->kernel_params[30]);
hc_clSetKernelArg (data.ocl, kernel, 31, sizeof (cl_uint), device_param->kernel_params[31]);
- hc_timer_t timer;
-
- hc_timer_set (&timer);
+ cl_event event;
if ((data.opts_type & OPTS_TYPE_PT_BITSLICE) && (data.attack_mode == ATTACK_MODE_BF))
{
const size_t global_work_size[3] = { num_elements, 32, 1 };
const size_t local_work_size[3] = { kernel_threads / 32, 32, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+ hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 2, NULL, global_work_size, local_work_size, 0, NULL, &event);
}
else
{
- size_t workgroup_size = 0;
-
- hc_clGetKernelWorkGroupInfo (data.ocl, kernel, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &workgroup_size, NULL);
-
if (kern_run == KERN_RUN_2)
{
if (data.opti_type & OPTI_TYPE_SLOW_HASH_SIMD)
}
}
- if (kernel_threads > workgroup_size) kernel_threads = workgroup_size;
-
while (num_elements % kernel_threads) num_elements++;
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
- hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
+ hc_clEnqueueNDRangeKernel (data.ocl, device_param->command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, &event);
}
hc_clFlush (data.ocl, device_param->command_queue);
- hc_clFinish (data.ocl, device_param->command_queue);
+ hc_clWaitForEvents (data.ocl, 1, &event);
if (event_update)
{
- double exec_time;
+ cl_ulong time_start;
+ cl_ulong time_end;
+
+ hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_START, sizeof (time_start), &time_start, NULL);
+ hc_clGetEventProfilingInfo (data.ocl, event, CL_PROFILING_COMMAND_END, sizeof (time_end), &time_end, NULL);
- hc_timer_get (timer, exec_time);
+ const double exec_time = (double) (time_end - time_start) / 1000000.0;
uint exec_pos = device_param->exec_pos;
device_param->exec_pos = exec_pos;
}
+
+ hc_clReleaseEvent (data.ocl, event);
+
+ hc_clFinish (data.ocl, device_param->command_queue);
}
static void run_kernel_mp (const uint kern_run, hc_device_param_t *device_param, const uint num)
break;
}
- size_t workgroup_size = 0;
-
- hc_clGetKernelWorkGroupInfo (data.ocl, kernel, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &workgroup_size, NULL);
-
- if (kernel_threads > workgroup_size) kernel_threads = workgroup_size;
-
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
cl_kernel kernel = device_param->kernel_tm;
- size_t workgroup_size = 0;
-
- hc_clGetKernelWorkGroupInfo (data.ocl, kernel, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &workgroup_size, NULL);
-
- if (kernel_threads > workgroup_size) kernel_threads = workgroup_size;
-
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
hc_clSetKernelArg (data.ocl, kernel, 5, sizeof (cl_uint), device_param->kernel_params_amp[5]);
hc_clSetKernelArg (data.ocl, kernel, 6, sizeof (cl_uint), device_param->kernel_params_amp[6]);
- size_t workgroup_size = 0;
-
- hc_clGetKernelWorkGroupInfo (data.ocl, kernel, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &workgroup_size, NULL);
-
- if (kernel_threads > workgroup_size) kernel_threads = workgroup_size;
-
const size_t global_work_size[3] = { num_elements, 1, 1 };
const size_t local_work_size[3] = { kernel_threads, 1, 1 };
// balancing the workload turns out to be very efficient
- const u32 kernel_power_balance = kernel_accel * kernel_loops;
+ if (kernel_loops_min != kernel_loops_max)
+ {
+ const u32 kernel_power_balance = kernel_accel * kernel_loops;
- u32 sqrtv;
+ u32 sqrtv;
- for (sqrtv = 1; sqrtv < 0x100000; sqrtv++)
- {
- if ((sqrtv * sqrtv) >= kernel_power_balance) break;
- }
+ for (sqrtv = 1; sqrtv < 0x100000; sqrtv++)
+ {
+ if ((sqrtv * sqrtv) >= kernel_power_balance) break;
+ }
- const u32 kernel_accel_try = sqrtv;
- const u32 kernel_loops_try = sqrtv;
+ const u32 kernel_accel_try = sqrtv;
+ const u32 kernel_loops_try = sqrtv;
- if ((kernel_accel_try <= kernel_accel_max) && (kernel_loops_try >= kernel_loops_min))
- {
- kernel_accel = kernel_accel_try;
- kernel_loops = kernel_loops_try;
+ if ((kernel_accel_try <= kernel_accel_max) && (kernel_loops_try >= kernel_loops_min))
+ {
+ kernel_accel = kernel_accel_try;
+ kernel_loops = kernel_loops_try;
+ }
}
// reset fake words
if (benchmark == 1)
{
log_info ("%s (%s) starting in benchmark-mode...", PROGNAME, VERSION_TAG);
-
log_info ("");
}
else if (restore == 1)
{
log_info ("%s (%s) starting in restore-mode...", PROGNAME, VERSION_TAG);
-
log_info ("");
}
else
{
log_info ("%s (%s) starting...", PROGNAME, VERSION_TAG);
-
log_info ("");
}
}
return (-1);
}
- if (hash_mode_chgd && hash_mode > 13500) // just added to remove compiler warnings for hash_mode_chgd
+ if (hash_mode_chgd && hash_mode > 13600) // just added to remove compiler warnings for hash_mode_chgd
{
log_error ("ERROR: Invalid hash-type specified");
dgst_size = DGST_SIZE_4_4;
parse_func = dcc2_parse_hash;
sort_by_digest = sort_by_digest_4_4;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_4_4;
parse_func = wpa_parse_hash;
sort_by_digest = sort_by_digest_4_4;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
parse_func = sha512osx_parse_hash;
sort_by_digest = sort_by_digest_8_16;
opti_type = OPTI_TYPE_ZERO_BYTE
- | OPTI_TYPE_USES_BITS_64;
+ | OPTI_TYPE_USES_BITS_64
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
parse_func = sha512grub_parse_hash;
sort_by_digest = sort_by_digest_8_16;
opti_type = OPTI_TYPE_ZERO_BYTE
- | OPTI_TYPE_USES_BITS_64;
+ | OPTI_TYPE_USES_BITS_64
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_4_32;
parse_func = cisco8_parse_hash;
sort_by_digest = sort_by_digest_4_32;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_4_32;
parse_func = djangopbkdf2_parse_hash;
sort_by_digest = sort_by_digest_4_32;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_4_32;
parse_func = pbkdf2_sha256_parse_hash;
sort_by_digest = sort_by_digest_4_32;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_4_32;
parse_func = pbkdf2_md5_parse_hash;
sort_by_digest = sort_by_digest_4_32;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_size = DGST_SIZE_4_32;
parse_func = pbkdf2_sha1_parse_hash;
sort_by_digest = sort_by_digest_4_32;
- opti_type = OPTI_TYPE_ZERO_BYTE;
+ opti_type = OPTI_TYPE_ZERO_BYTE
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
parse_func = pbkdf2_sha512_parse_hash;
sort_by_digest = sort_by_digest_8_16;
opti_type = OPTI_TYPE_ZERO_BYTE
- | OPTI_TYPE_USES_BITS_64;
+ | OPTI_TYPE_USES_BITS_64
+ | OPTI_TYPE_SLOW_HASH_SIMD;
dgst_pos0 = 0;
dgst_pos1 = 1;
dgst_pos2 = 2;
dgst_pos3 = 1;
break;
+ case 13600: hash_type = HASH_TYPE_PBKDF2_SHA1;
+ salt_type = SALT_TYPE_EMBEDDED;
+ attack_exec = ATTACK_EXEC_OUTSIDE_KERNEL;
+ opts_type = OPTS_TYPE_PT_GENERATE_LE;
+ kern_type = KERN_TYPE_ZIP2;
+ dgst_size = DGST_SIZE_4_4;
+ parse_func = zip2_parse_hash;
+ sort_by_digest = sort_by_digest_4_4;
+ opti_type = OPTI_TYPE_ZERO_BYTE;
+ dgst_pos0 = 0;
+ dgst_pos1 = 1;
+ dgst_pos2 = 2;
+ dgst_pos3 = 3;
+ break;
+
default: usage_mini_print (PROGNAME); return (-1);
}
case 13100: esalt_size = sizeof (krb5tgs_t); break;
case 13400: esalt_size = sizeof (keepass_t); break;
case 13500: esalt_size = sizeof (pstoken_t); break;
+ case 13600: esalt_size = sizeof (zip2_t); break;
}
data.esalt_size = esalt_size;
((seven_zip_t *) hashes_buf[0].esalt)->data_len = 112;
((seven_zip_t *) hashes_buf[0].esalt)->unpack_size = 112;
break;
- case 13400: ((keepass_t *) hashes_buf[0].esalt)->version = 2;
+ case 13400: ((keepass_t *) hashes_buf[0].esalt)->version = 2;
break;
- case 13500: ((pstoken_t *) hashes_buf[0].esalt)->salt_len = 113;
+ case 13500: ((pstoken_t *) hashes_buf[0].esalt)->salt_len = 113;
+ break;
+ case 13600: ((zip2_t *) hashes_buf[0].esalt)->salt_len = 16;
+ ((zip2_t *) hashes_buf[0].esalt)->data_len = 32;
+ ((zip2_t *) hashes_buf[0].esalt)->mode = 3;
break;
}
}
break;
case 13400: hashes_buf[0].salt->salt_iter = ROUNDS_KEEPASS;
break;
+ case 13600: hashes_buf[0].salt->salt_iter = ROUNDS_ZIP2;
+ break;
}
hashes_cnt = 1;
}
else if (strcmp (platform_vendor, CL_VENDOR_APPLE) == 0)
{
- vendor_id = VENDOR_ID_GENERIC;
+ vendor_id = VENDOR_ID_APPLE;
}
else if (strcmp (platform_vendor, CL_VENDOR_INTEL_BEIGNET) == 0)
{
- vendor_id = VENDOR_ID_GENERIC;
+ vendor_id = VENDOR_ID_INTEL_BEIGNET;
}
else if (strcmp (platform_vendor, CL_VENDOR_INTEL_SDK) == 0)
{
- vendor_id = VENDOR_ID_GENERIC;
+ vendor_id = VENDOR_ID_INTEL_SDK;
}
else if (strcmp (platform_vendor, CL_VENDOR_MESA) == 0)
{
- vendor_id = VENDOR_ID_GENERIC;
+ vendor_id = VENDOR_ID_MESA;
}
else if (strcmp (platform_vendor, CL_VENDOR_NV) == 0)
{
}
else if (strcmp (platform_vendor, CL_VENDOR_POCL) == 0)
{
- vendor_id = VENDOR_ID_GENERIC;
+ vendor_id = VENDOR_ID_POCL;
}
else
{
{
log_info ("Watchdog: Temperature retain trigger set to %uc", gpu_temp_retain);
}
+
+ if (data.quiet == 0) log_info ("");
#endif
}
- if (data.quiet == 0) log_info ("");
-
/**
* HM devices: copy
*/
if (benchmark == 1) log_info ("Hashmode: %d", data.hash_mode);
#endif
+ if (data.quiet == 0) log_info_nn ("Initializing device kernels and memory...");
+
uint kernel_power_all = 0;
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
}
}
- if (quiet == 0) log_info ("");
-
for (uint tmto = tmto_start; tmto < tmto_stop; tmto++)
{
// TODO: in theory the following calculation needs to be done per salt, not global
return -1;
}
- if (quiet == 0) log_info ("");
if (quiet == 0) log_info ("SCRYPT tmto optimizer value set to: %u, mem: %u\n", data.salts_buf[0].scrypt_tmto, size_scryptV);
}
case 13000: size_tmps = kernel_power_max * sizeof (pbkdf2_sha256_tmp_t); break;
case 13200: size_tmps = kernel_power_max * sizeof (axcrypt_tmp_t); break;
case 13400: size_tmps = kernel_power_max * sizeof (keepass_tmp_t); break;
+ case 13600: size_tmps = kernel_power_max * sizeof (pbkdf2_sha1_tmp_t); break;
};
// size_hooks
snprintf (build_opts, sizeof (build_opts) - 1, "-cl-std=CL1.1 -I\"%s/\" -DVENDOR_ID=%u -DCUDA_ARCH=%d -DVECT_SIZE=%u -DDEVICE_TYPE=%u -DKERN_TYPE=%u -D_unroll", shared_dir, device_param->vendor_id, (device_param->sm_major * 100) + device_param->sm_minor, device_param->vector_width, (u32) device_param->device_type, kern_type);
+ if (device_param->vendor_id == VENDOR_ID_INTEL_SDK)
+ {
+ // we do vectorizing much better than the auto-vectorizer
+
+ char build_opts_new[1024] = { 0 };
+
+ snprintf (build_opts_new, sizeof (build_opts_new) - 1, "%s -cl-opt-disable", build_opts);
+
+ strncpy (build_opts, build_opts_new, sizeof (build_opts) - 1);
+ }
+
+ #ifdef DEBUG
+ log_info ("Device #%u: build_opts '%s'\n", device_id + 1, build_opts);
+ #endif
+
/**
* main kernel
*/
if (rc != 0)
{
device_param->skipped = true;
+
log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+
continue;
}
}
else
{
- if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ #ifdef DEBUG
+ log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ #endif
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
}
else
{
- if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, source_file, sst.st_size);
+ #ifdef DEBUG
+ log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, source_file, sst.st_size);
+ #endif
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
if (cached == 0)
{
if (quiet == 0) log_info ("Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, cached_file);
+ if (quiet == 0) log_info ("");
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
if (rc != 0)
{
device_param->skipped = true;
+
log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+
continue;
}
}
else
{
- if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ #ifdef DEBUG
+ log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ #endif
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
if (cached == 0)
{
if (quiet == 0) log_info ("Device #%u: Kernel %s not found in cache! Building may take a while...", device_id + 1, cached_file);
+ if (quiet == 0) log_info ("");
load_kernel (source_file, 1, kernel_lengths, kernel_sources);
if (rc != 0)
{
device_param->skipped = true;
+
log_info ("Device #%u: Kernel %s build failure. Proceed without this device.", device_id + 1, source_file);
+
continue;
}
}
else
{
+ #ifdef DEBUG
if (quiet == 0) log_info ("Device #%u: Kernel %s (%ld bytes)", device_id + 1, cached_file, cst.st_size);
+ #endif
load_kernel (cached_file, 1, kernel_lengths, kernel_sources);
* kernel name
*/
+ size_t kernel_wgs_tmp;
+
char kernel_name[64] = { 0 };
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_tm", kern_type);
device_param->kernel_tm = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_tm, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
}
}
}
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_hook12", kern_type);
device_param->kernel12 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel12, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
}
if (opts_type & OPTS_TYPE_HOOK23)
snprintf (kernel_name, sizeof (kernel_name) - 1, "m%05d_hook23", kern_type);
device_param->kernel23 = hc_clCreateKernel (data.ocl, device_param->program, kernel_name);
+
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel23, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
}
}
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel1, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel2, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel3, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
for (uint i = 0; i <= 20; i++)
{
hc_clSetKernelArg (data.ocl, device_param->kernel1, i, sizeof (cl_mem), device_param->kernel_params[i]);
device_param->kernel_mp_l = hc_clCreateKernel (data.ocl, device_param->program_mp, "l_markov");
device_param->kernel_mp_r = hc_clCreateKernel (data.ocl, device_param->program_mp, "r_markov");
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp_l, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp_r, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
+
if (opts_type & OPTS_TYPE_PT_BITSLICE)
{
hc_clSetKernelArg (data.ocl, device_param->kernel_tm, 0, sizeof (cl_mem), device_param->kernel_params_tm[0]);
else if (attack_mode == ATTACK_MODE_HYBRID1)
{
device_param->kernel_mp = hc_clCreateKernel (data.ocl, device_param->program_mp, "C_markov");
+
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
}
else if (attack_mode == ATTACK_MODE_HYBRID2)
{
device_param->kernel_mp = hc_clCreateKernel (data.ocl, device_param->program_mp, "C_markov");
+
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_mp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
}
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
else
{
device_param->kernel_amp = hc_clCreateKernel (data.ocl, device_param->program_amp, "amp");
+
+ hc_clGetKernelWorkGroupInfo (data.ocl, device_param->kernel_amp, device_param->device, CL_KERNEL_WORK_GROUP_SIZE, sizeof (size_t), &kernel_wgs_tmp, NULL); kernel_threads = MIN (kernel_threads, kernel_wgs_tmp);
}
if (attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
}
}
+ // maybe this has been updated by clGetKernelWorkGroupInfo()
+ // value can only be decreased, so we don't need to reallocate buffers
+
+ device_param->kernel_threads = kernel_threads;
+
/**
* Store initial fanspeed if gpu_temp_retain is enabled
*/
data.kernel_power_all = kernel_power_all;
- if (data.quiet == 0) log_info ("");
+ if (data.quiet == 0) log_info_nn ("");
/**
* In benchmark-mode, inform user which algorithm is checked
*/
- if (benchmark == 1)
+ if (benchmark == 1 && status_automat == 0)
{
quiet = 0;
{
weak_hash_check (device_param, salt_pos);
}
- }
- // Display hack, guarantee that there is at least one \r before real start
+ // Display hack, guarantee that there is at least one \r before real start
- if (data.quiet == 0) log_info_nn ("");
+ //if (data.quiet == 0) log_info ("");
+ }
/**
* status and monitor threads
{
if (quiet == 0)
{
- log_info ("");
log_info ("ATTENTION!");
log_info (" The wordlist or mask you are using is too small.");
log_info (" Therefore, hashcat is unable to utilize the full parallelization power of your device(s).");
{
status_benchmark ();
- log_info ("");
+ if (status_automat == 0)
+ {
+ log_info ("");
+ }
}
else
{