#define NVIDIA_SPIN_DAMP 100
#define GPU_TEMP_DISABLE 0
#define GPU_TEMP_ABORT 90
-#define GPU_TEMP_RETAIN 65
+#define GPU_TEMP_RETAIN 75
#define WORKLOAD_PROFILE 2
#define KERNEL_ACCEL 0
#define KERNEL_LOOPS 0
time (&runtime_cur);
- int runtime_left = data.runtime_start + data.runtime - runtime_cur;
+ int runtime_left = data.proc_start + data.runtime - runtime_cur;
if (runtime_left <= 0)
{
if (status_left == 0)
{
- //hc_thread_mutex_lock (mux_display);
+ hc_thread_mutex_lock (mux_display);
if (data.quiet == 0) clear_prompt ();
if (data.quiet == 0) log_info ("");
- //hc_thread_mutex_unlock (mux_display);
+ hc_thread_mutex_unlock (mux_display);
status_left = data.status_timer;
}
data.status = status;
}
- uint i_threads_cnt = 0;
+ uint outer_threads_cnt = 0;
- hc_thread_t *i_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
+ hc_thread_t *outer_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
if (keyspace == 0 && benchmark == 0 && stdout_flag == 0)
{
if ((data.wordlist_mode == WL_MODE_FILE) || (data.wordlist_mode == WL_MODE_MASK))
{
- hc_thread_create (i_threads[i_threads_cnt], thread_keypress, NULL);
+ hc_thread_create (outer_threads[outer_threads_cnt], thread_keypress, NULL);
- i_threads_cnt++;
+ outer_threads_cnt++;
}
+
+ hc_thread_create (outer_threads[outer_threads_cnt], thread_monitor, NULL);
+
+ outer_threads_cnt++;
}
/**
if (cpu_rule_to_kernel_rule (rule_buf, rule_len, &kernel_rules_buf[kernel_rules_cnt]) == -1)
{
- log_info ("WARNING: Cannot convert rule for use on device in file %s on line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("WARNING: Cannot convert rule for use on OpenCL device in file %s on line %u: %s", rp_file, rule_line, rule_buf);
memset (&kernel_rules_buf[kernel_rules_cnt], 0, sizeof (kernel_rule_t)); // needs to be cleared otherwise we could have some remaining data
/* its so slow
if (rulefind (&kernel_rules_buf[kernel_rules_cnt], kernel_rules_buf, kernel_rules_cnt, sizeof (kernel_rule_t), sort_by_kernel_rule))
{
- log_info ("Duplicate rule for use on device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
+ log_info ("Duplicate rule for use on OpenCL device in file %s in line %u: %s", rp_file, rule_line, rule_buf);
continue;
}
* generate NOP rules
*/
- if (kernel_rules_cnt == 0)
+ if ((rp_files_cnt == 0) && (rp_gen == 0))
{
kernel_rules_buf = (kernel_rule_t *) mymalloc (sizeof (kernel_rule_t));
data.kernel_rules_cnt = kernel_rules_cnt;
data.kernel_rules_buf = kernel_rules_buf;
+ if (kernel_rules_cnt == 0)
+ {
+ log_error ("ERROR: No valid rules left");
+
+ return (-1);
+ }
+
/**
* OpenCL platforms: detect
*/
if (hm_NVML_nvmlDeviceGetFanSpeed (data.hm_nvml, 0, hm_adapters_nvml[i].nvml, &speed) == NVML_SUCCESS) hm_adapters_nvml[i].fan_get_supported = 1;
- hm_NVML_nvmlDeviceSetComputeMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_COMPUTEMODE_EXCLUSIVE_PROCESS);
-
- hm_NVML_nvmlDeviceSetGpuOperationMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_GOM_ALL_ON);
+ // doesn't seem to create any advantages
+ //hm_NVML_nvmlDeviceSetComputeMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_COMPUTEMODE_EXCLUSIVE_PROCESS);
+ //hm_NVML_nvmlDeviceSetGpuOperationMode (data.hm_nvml, 1, hm_adapters_nvml[i].nvml, NVML_GOM_ALL_ON);
}
}
}
* OpenCL devices: allocate buffer for device specific information
*/
- int *temp_retain_fanspeed_value = (int *) mycalloc (data.devices_cnt, sizeof (int));
- int *temp_retain_fanpolicy_value = (int *) mycalloc (data.devices_cnt, sizeof (int));
-
ADLOD6MemClockState *od_clock_mem_status = (ADLOD6MemClockState *) mycalloc (data.devices_cnt, sizeof (ADLOD6MemClockState));
int *od_power_control_status = (int *) mycalloc (data.devices_cnt, sizeof (int));
const int fanspeed = hm_get_fanspeed_with_device_id (device_id);
const int fanpolicy = hm_get_fanpolicy_with_device_id (device_id);
- temp_retain_fanspeed_value[device_id] = fanspeed;
- temp_retain_fanpolicy_value[device_id] = fanpolicy;
-
// we also set it to tell the OS we take control over the fan and it's automatic controller
// if it was set to automatic. we do not control user-defined fanspeeds.
data.devices_status = STATUS_STARTING;
}
- uint ni_threads_cnt = 0;
+ uint inner_threads_cnt = 0;
- hc_thread_t *ni_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
-
- if (keyspace == 0 && benchmark == 0 && stdout_flag == 0)
- {
- hc_thread_create (ni_threads[ni_threads_cnt], thread_monitor, NULL);
-
- ni_threads_cnt++;
- }
+ hc_thread_t *inner_threads = (hc_thread_t *) mycalloc (10, sizeof (hc_thread_t));
/**
* Outfile remove
!((hash_mode >= 13700) && (hash_mode <= 13799)) &&
(hash_mode != 9000))
{
- hc_thread_create (ni_threads[ni_threads_cnt], thread_outfile_remove, NULL);
+ hc_thread_create (inner_threads[inner_threads_cnt], thread_outfile_remove, NULL);
- ni_threads_cnt++;
+ inner_threads_cnt++;
}
else
{
logfile_sub_msg ("START");
- data.devices_status = STATUS_INIT;
+ if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_INIT;
+ }
memset (data.words_progress_done, 0, data.salts_cnt * sizeof (u64));
memset (data.words_progress_rejected, 0, data.salts_cnt * sizeof (u64));
hc_thread_t *c_threads = (hc_thread_t *) mycalloc (data.devices_cnt, sizeof (hc_thread_t));
- data.devices_status = STATUS_AUTOTUNE;
+ if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_AUTOTUNE;
+ }
for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
{
* create cracker threads
*/
- data.devices_status = STATUS_RUNNING;
+ if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
+ {
+ data.devices_status = STATUS_RUNNING;
+ }
if (initial_restore_done == 0)
{
local_free (c_threads);
- if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT) && (data.devices_status != STATUS_BYPASS))
- {
- data.devices_status = STATUS_EXHAUSTED;
- }
-
logfile_sub_var_uint ("status-after-work", data.devices_status);
data.restore = 0;
global_free (subid);
- // from this point we handle bypass as exhausted
+ // from this point we handle bypass as running
if (data.devices_status == STATUS_BYPASS)
{
- data.devices_status = STATUS_EXHAUSTED;
+ data.devices_status = STATUS_RUNNING;
}
// finalize task
// wait for non-interactive threads
- for (uint thread_idx = 0; thread_idx < ni_threads_cnt; thread_idx++)
+ if ((data.devices_status != STATUS_CRACKED) && (data.devices_status != STATUS_ABORTED) && (data.devices_status != STATUS_QUIT))
{
- hc_thread_wait (1, &ni_threads[thread_idx]);
+ data.devices_status = STATUS_EXHAUSTED;
}
- local_free (ni_threads);
+ for (uint thread_idx = 0; thread_idx < inner_threads_cnt; thread_idx++)
+ {
+ hc_thread_wait (1, &inner_threads[thread_idx]);
+ }
+
+ local_free (inner_threads);
// we dont need restore file anymore
if (data.restore_disable == 0)
if (data.hm_device[device_id].fan_set_supported == 1)
{
- int fanspeed = temp_retain_fanspeed_value[device_id];
- int fanpolicy = temp_retain_fanpolicy_value[device_id];
+ int rc = -1;
- if (fanpolicy == 1)
+ if (device_param->device_vendor_id == VENDOR_ID_AMD)
{
- int rc = -1;
-
- if (device_param->device_vendor_id == VENDOR_ID_AMD)
- {
- rc = hm_set_fanspeed_with_device_id_adl (device_id, fanspeed, 0);
- }
- else if (device_param->device_vendor_id == VENDOR_ID_NV)
- {
- #ifdef LINUX
- rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_FALSE);
- #endif
-
- #ifdef WIN
- rc = hm_set_fanspeed_with_device_id_nvapi (device_id, fanspeed, fanpolicy);
- #endif
- }
+ rc = hm_set_fanspeed_with_device_id_adl (device_id, 100, 0);
+ }
+ else if (device_param->device_vendor_id == VENDOR_ID_NV)
+ {
+ #ifdef LINUX
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_FALSE);
+ #endif
- if (rc == -1) log_info ("WARNING: Failed to restore default fan speed and policy for device #%", device_id + 1);
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, 100, 0);
+ #endif
}
+
+ if (rc == -1) log_info ("WARNING: Failed to restore default fan speed and policy for device #%", device_id + 1);
}
}
local_free (bitmap_s2_d);
#ifdef HAVE_HWMON
- local_free (temp_retain_fanspeed_value);
local_free (od_clock_mem_status);
local_free (od_power_control_status);
local_free (nvml_power_limit);
// wait for interactive threads
- for (uint thread_idx = 0; thread_idx < i_threads_cnt; thread_idx++)
+ for (uint thread_idx = 0; thread_idx < outer_threads_cnt; thread_idx++)
{
- hc_thread_wait (1, &i_threads[thread_idx]);
+ hc_thread_wait (1, &outer_threads[thread_idx]);
}
- local_free (i_threads);
+ local_free (outer_threads);
// destroy others mutex