#define SEPARATOR ':'
#define BITMAP_MIN 16
#define BITMAP_MAX 24
+#define NVIDIA_SPIN_DAMP 100
#define GPU_TEMP_DISABLE 0
#define GPU_TEMP_ABORT 90
-#define GPU_TEMP_RETAIN 0
+#define GPU_TEMP_RETAIN 65
#define WORKLOAD_PROFILE 2
#define KERNEL_ACCEL 0
#define KERNEL_LOOPS 0
" -w, --workload-profile | Num | Enable a specific workload profile, see pool below | -w 3",
" -n, --kernel-accel | Num | Manual workload tuning, set outerloop step size to X | -n 64",
" -u, --kernel-loops | Num | Manual workload tuning, set innerloop step size to X | -u 256",
+ " --nvidia-spin-damp | Num | Workaround NVidias CPU burning loop bug, in percent | --nvidia-spin-damp=50",
" --gpu-temp-disable | | Disable temperature and fanspeed reads and triggers |",
#ifdef HAVE_HWMON
" --gpu-temp-abort | Num | Abort if GPU temperature reaches X degrees celsius | --gpu-temp-abort=100",
fputs (out_buf, fp);
- if (fp == stdout)
- {
- log_out (fp, "");
- }
- else
- {
- fputc ('\n', fp);
- }
+ fputc ('\n', fp);
}
else
{
hc_clFlush (data.ocl, device_param->command_queue);
- if (data.devices_status == STATUS_RUNNING)
+ if (device_param->nvidia_spin_damp)
{
- if (iteration < EXPECTED_ITERATIONS)
+ if (data.devices_status == STATUS_RUNNING)
{
- switch (kern_run)
+ if (iteration < EXPECTED_ITERATIONS)
{
- case KERN_RUN_1: if (device_param->exec_us_prev1[iteration]) usleep (device_param->exec_us_prev1[iteration]); break;
- case KERN_RUN_2: if (device_param->exec_us_prev2[iteration]) usleep (device_param->exec_us_prev2[iteration]); break;
- case KERN_RUN_3: if (device_param->exec_us_prev3[iteration]) usleep (device_param->exec_us_prev3[iteration]); break;
+ switch (kern_run)
+ {
+ case KERN_RUN_1: if (device_param->exec_us_prev1[iteration]) usleep (device_param->exec_us_prev1[iteration] * device_param->nvidia_spin_damp); break;
+ case KERN_RUN_2: if (device_param->exec_us_prev2[iteration]) usleep (device_param->exec_us_prev2[iteration] * device_param->nvidia_spin_damp); break;
+ case KERN_RUN_3: if (device_param->exec_us_prev3[iteration]) usleep (device_param->exec_us_prev3[iteration] * device_param->nvidia_spin_damp); break;
+ }
}
}
}
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef WIN
+ hm_set_fanspeed_with_device_id_nvapi (device_id, fan_speed_new, 1);
+ #endif
+ #ifdef LINUX
+ hm_set_fanspeed_with_device_id_xnvctrl (device_id, fan_speed_new);
+ #endif
}
fan_speed_chgd[device_id] = 1;
uint workload_profile = WORKLOAD_PROFILE;
uint kernel_accel = KERNEL_ACCEL;
uint kernel_loops = KERNEL_LOOPS;
+ uint nvidia_spin_damp = NVIDIA_SPIN_DAMP;
uint gpu_temp_disable = GPU_TEMP_DISABLE;
#ifdef HAVE_HWMON
uint gpu_temp_abort = GPU_TEMP_ABORT;
#define IDX_WORKLOAD_PROFILE 'w'
#define IDX_KERNEL_ACCEL 'n'
#define IDX_KERNEL_LOOPS 'u'
+ #define IDX_NVIDIA_SPIN_DAMP 0xff79
#define IDX_GPU_TEMP_DISABLE 0xff29
#define IDX_GPU_TEMP_ABORT 0xff30
#define IDX_GPU_TEMP_RETAIN 0xff31
{"workload-profile", required_argument, 0, IDX_WORKLOAD_PROFILE},
{"kernel-accel", required_argument, 0, IDX_KERNEL_ACCEL},
{"kernel-loops", required_argument, 0, IDX_KERNEL_LOOPS},
+ {"nvidia-spin-damp", required_argument, 0, IDX_NVIDIA_SPIN_DAMP},
{"gpu-temp-disable", no_argument, 0, IDX_GPU_TEMP_DISABLE},
#ifdef HAVE_HWMON
{"gpu-temp-abort", required_argument, 0, IDX_GPU_TEMP_ABORT},
uint runtime_chgd = 0;
uint kernel_loops_chgd = 0;
uint kernel_accel_chgd = 0;
+ uint nvidia_spin_damp_chgd = 0;
uint attack_mode_chgd = 0;
uint outfile_format_chgd = 0;
uint rp_gen_seed_chgd = 0;
kernel_accel_chgd = 1; break;
case IDX_KERNEL_LOOPS: kernel_loops = atoi (optarg);
kernel_loops_chgd = 1; break;
+ case IDX_NVIDIA_SPIN_DAMP: nvidia_spin_damp = atoi (optarg);
+ nvidia_spin_damp_chgd = 1; break;
case IDX_GPU_TEMP_DISABLE: gpu_temp_disable = 1; break;
#ifdef HAVE_HWMON
case IDX_GPU_TEMP_ABORT: gpu_temp_abort = atoi (optarg); break;
weak_hash_threshold = 0;
}
+ if (nvidia_spin_damp > 100)
+ {
+ log_error ("ERROR: setting --nvidia-spin-damp must be between 0 and 100 (inclusive)");
+
+ return (-1);
+ }
+
+
/**
* induction directory
*/
logfile_top_uint (force);
logfile_top_uint (kernel_accel);
logfile_top_uint (kernel_loops);
+ logfile_top_uint (nvidia_spin_damp);
logfile_top_uint (gpu_temp_disable);
#ifdef HAVE_HWMON
logfile_top_uint (gpu_temp_abort);
restore_disable = 1;
potfile_disable = 1;
weak_hash_threshold = 0;
+ nvidia_spin_damp = 0;
gpu_temp_disable = 1;
outfile_check_timer = 0;
* OpenCL devices: simply push all devices from all platforms into the same device array
*/
- int need_adl = 0;
- int need_nvapi = 0;
- int need_nvml = 0;
+ int need_adl = 0;
+ int need_nvapi = 0;
+ int need_nvml = 0;
+ int need_xnvctrl = 0;
hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
{
need_nvml = 1;
- #ifdef _WIN
+ #ifdef LINUX
+ need_xnvctrl = 1;
+ #endif
+
+ #ifdef WIN
need_nvapi = 1;
#endif
}
device_param->sm_minor = sm_minor;
device_param->sm_major = sm_major;
+
+ // CPU burning loop damper
+ // Value is given as number between 0-100
+ // By default 100%
+
+ device_param->nvidia_spin_damp = (double) nvidia_spin_damp;
+
+ if (nvidia_spin_damp_chgd == 0)
+ {
+ if (data.attack_mode == ATTACK_MODE_STRAIGHT)
+ {
+ /**
+ * the workaround is not a friend of rule based attacks
+ * the words from the wordlist combined with fast and slow rules cause
+ * fluctuations which cause inaccurate wait time estimations
+ * using a reduced damping percentage almost compensates this
+ */
+
+ device_param->nvidia_spin_damp = 64;
+ }
+ }
+
+ device_param->nvidia_spin_damp /= 100;
}
else
{
*/
#ifdef HAVE_HWMON
- hm_attrs_t hm_adapters_adl[DEVICES_MAX] = { { 0 } };
- hm_attrs_t hm_adapters_nvapi[DEVICES_MAX] = { { 0 } };
- hm_attrs_t hm_adapters_nvml[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_adl[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_nvapi[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_nvml[DEVICES_MAX] = { { 0 } };
+ hm_attrs_t hm_adapters_xnvctrl[DEVICES_MAX] = { { 0 } };
if (gpu_temp_disable == 0)
{
- ADL_PTR *adl = (ADL_PTR *) mymalloc (sizeof (ADL_PTR));
- NVAPI_PTR *nvapi = (NVAPI_PTR *) mymalloc (sizeof (NVAPI_PTR));
- NVML_PTR *nvml = (NVML_PTR *) mymalloc (sizeof (NVML_PTR));
+ ADL_PTR *adl = (ADL_PTR *) mymalloc (sizeof (ADL_PTR));
+ NVAPI_PTR *nvapi = (NVAPI_PTR *) mymalloc (sizeof (NVAPI_PTR));
+ NVML_PTR *nvml = (NVML_PTR *) mymalloc (sizeof (NVML_PTR));
+ XNVCTRL_PTR *xnvctrl = (XNVCTRL_PTR *) mymalloc (sizeof (XNVCTRL_PTR));
- data.hm_adl = NULL;
- data.hm_nvapi = NULL;
- data.hm_nvml = NULL;
+ data.hm_adl = NULL;
+ data.hm_nvapi = NULL;
+ data.hm_nvml = NULL;
+ data.hm_xnvctrl = NULL;
if ((need_nvml == 1) && (nvml_init (nvml) == 0))
{
}
}
+ if ((need_xnvctrl == 1) && (xnvctrl_init (xnvctrl) == 0))
+ {
+ data.hm_xnvctrl = xnvctrl;
+ }
+
+ if (data.hm_xnvctrl)
+ {
+ if (hm_XNVCTRL_XOpenDisplay (data.hm_xnvctrl) == 0)
+ {
+ for (uint device_id = 0; device_id < data.devices_cnt; device_id++)
+ {
+ hc_device_param_t *device_param = &data.devices_param[device_id];
+
+ if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
+
+ hm_adapters_xnvctrl[device_id].xnvctrl = device_id;
+
+ int speed = 0;
+
+ if (get_fan_speed_current (data.hm_xnvctrl, device_id, &speed) == 0) hm_adapters_xnvctrl[device_id].fan_get_supported = 1;
+ }
+ }
+ }
+
if ((need_adl == 1) && (adl_init (adl) == 0))
{
data.hm_adl = adl;
}
}
- if (data.hm_adl == NULL && data.hm_nvml == NULL)
+ if (data.hm_adl == NULL && data.hm_nvml == NULL && data.hm_xnvctrl == NULL)
{
gpu_temp_disable = 1;
}
*/
#ifdef HAVE_HWMON
- if (gpu_temp_disable == 0 && data.hm_adl == NULL && data.hm_nvml == NULL)
+ if (gpu_temp_disable == 0 && data.hm_adl == NULL && data.hm_nvml == NULL && data.hm_xnvctrl == NULL)
{
log_info ("Watchdog: Hardware Monitoring Interface not found on your system");
}
data.hm_device[device_id].adl = hm_adapters_adl[platform_devices_id].adl;
data.hm_device[device_id].nvapi = 0;
data.hm_device[device_id].nvml = 0;
+ data.hm_device[device_id].xnvctrl = 0;
data.hm_device[device_id].od_version = hm_adapters_adl[platform_devices_id].od_version;
data.hm_device[device_id].fan_get_supported = hm_adapters_adl[platform_devices_id].fan_get_supported;
- data.hm_device[device_id].fan_set_supported = hm_adapters_adl[platform_devices_id].fan_set_supported;
+ data.hm_device[device_id].fan_set_supported = 0;
}
if (device_param->device_vendor_id == VENDOR_ID_NV)
data.hm_device[device_id].adl = 0;
data.hm_device[device_id].nvapi = hm_adapters_nvapi[platform_devices_id].nvapi;
data.hm_device[device_id].nvml = hm_adapters_nvml[platform_devices_id].nvml;
+ data.hm_device[device_id].xnvctrl = hm_adapters_xnvctrl[platform_devices_id].xnvctrl;
data.hm_device[device_id].od_version = 0;
data.hm_device[device_id].fan_get_supported = hm_adapters_nvml[platform_devices_id].fan_get_supported;
data.hm_device[device_id].fan_set_supported = 0;
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef LINUX
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_TRUE);
+ #endif
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, fanspeed, 1);
+ #endif
}
if (rc == 0)
}
else if (device_param->device_vendor_id == VENDOR_ID_NV)
{
+ #ifdef LINUX
+ rc = set_fan_control (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, NV_CTRL_GPU_COOLER_MANUAL_CONTROL_FALSE);
+ #endif
+ #ifdef WIN
+ rc = hm_set_fanspeed_with_device_id_nvapi (device_id, fanspeed, fanpolicy);
+ #endif
}
if (rc == -1) log_info ("WARNING: Failed to restore default fan speed and policy for device #%", device_id + 1);
data.hm_nvml = NULL;
}
+ if (data.hm_nvapi)
+ {
+ hm_NvAPI_Unload (data.hm_nvapi);
+
+ nvapi_close (data.hm_nvapi);
+
+ data.hm_nvapi = NULL;
+ }
+
+ if (data.hm_xnvctrl)
+ {
+ hm_XNVCTRL_XCloseDisplay (data.hm_xnvctrl);
+
+ xnvctrl_close (data.hm_xnvctrl);
+
+ data.hm_xnvctrl = NULL;
+ }
+
if (data.hm_adl)
{
hm_ADL_Main_Control_Destroy (data.hm_adl);