}
#endif // F_SETLKW
-#ifdef _WIN
+#ifdef WIN
void fsync (int fd)
{
HANDLE h = (HANDLE) _get_osfhandle (fd);
*/
#ifdef HAVE_HWMON
-#if defined(_WIN) && defined(HAVE_NVAPI)
-int hm_get_adapter_index_nv (HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX])
-{
- NvU32 pGpuCount;
-
- if (hm_NvAPI_EnumPhysicalGPUs (data.hm_nv, nvGPUHandle, &pGpuCount) != NVAPI_OK) return (0);
-
- if (pGpuCount == 0)
- {
- log_info ("WARN: No NvAPI adapters found");
- return (0);
- }
-
- return (pGpuCount);
-}
-#endif // _WIN && HAVE_NVAPI
-
-#if defined(LINUX) && defined(HAVE_NVML)
-int hm_get_adapter_index_nv (HM_ADAPTER_NV nvGPUHandle[DEVICES_MAX])
-{
- int pGpuCount = 0;
-
- for (uint i = 0; i < DEVICES_MAX; i++)
- {
- if (hm_NVML_nvmlDeviceGetHandleByIndex (data.hm_nv, 1, i, &nvGPUHandle[i]) != NVML_SUCCESS) break;
-
- // can be used to determine if the device by index matches the cuda device by index
- // char name[100]; memset (name, 0, sizeof (name));
- // hm_NVML_nvmlDeviceGetName (data.hm_nv, nvGPUHandle[i], name, sizeof (name) - 1);
-
- pGpuCount++;
- }
-
- if (pGpuCount == 0)
- {
- log_info ("WARN: No NVML adapters found");
-
- return (0);
- }
-
- return (pGpuCount);
-}
-#endif // LINUX && HAVE_NVML
-
-#ifdef HAVE_ADL
-int get_adapters_num_amd (void *adl, int *iNumberAdapters)
+int get_adapters_num_adl (void *adl, int *iNumberAdapters)
{
if (hm_ADL_Adapter_NumberOfAdapters_Get ((ADL_PTR *) adl, iNumberAdapters) != ADL_OK) return -1;
}
*/
-LPAdapterInfo hm_get_adapter_info_amd (void *adl, int iNumberAdapters)
+LPAdapterInfo hm_get_adapter_info_adl (void *adl, int iNumberAdapters)
{
size_t AdapterInfoSize = iNumberAdapters * sizeof (AdapterInfo);
return lpAdapterInfo;
}
+int hm_get_adapter_index_nvapi (HM_ADAPTER_NVAPI nvapiGPUHandle[DEVICES_MAX])
+{
+ NvU32 pGpuCount;
+
+ if (hm_NvAPI_EnumPhysicalGPUs (data.hm_nvapi, nvapiGPUHandle, &pGpuCount) != NVAPI_OK) return (0);
+
+ if (pGpuCount == 0)
+ {
+ log_info ("WARN: No NvAPI adapters found");
+
+ return (0);
+ }
+
+ return (pGpuCount);
+}
+
+int hm_get_adapter_index_nvml (HM_ADAPTER_NVML nvmlGPUHandle[DEVICES_MAX])
+{
+ int pGpuCount = 0;
+
+ for (uint i = 0; i < DEVICES_MAX; i++)
+ {
+ if (hm_NVML_nvmlDeviceGetHandleByIndex (data.hm_nvml, 1, i, &nvmlGPUHandle[i]) != NVML_SUCCESS) break;
+
+ // can be used to determine if the device by index matches the cuda device by index
+ // char name[100]; memset (name, 0, sizeof (name));
+ // hm_NVML_nvmlDeviceGetName (data.hm_nvml, nvGPUHandle[i], name, sizeof (name) - 1);
+
+ pGpuCount++;
+ }
+
+ if (pGpuCount == 0)
+ {
+ log_info ("WARN: No NVML adapters found");
+
+ return (0);
+ }
+
+ return (pGpuCount);
+}
+
/*
//
-// does not help at all, since AMD does not assign different bus id, device id when we have multi GPU setups
+// does not help at all, since ADL does not assign different bus id, device id when we have multi GPU setups
//
int hm_get_opencl_device_index (hm_attrs_t *hm_device, uint num_adl_adapters, int bus_num, int dev_num)
if ((FanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_PERCENT_READ) &&
(FanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE))
{
- hm_device[opencl_device_index].fan_supported = 1;
+ hm_device[opencl_device_index].fan_get_supported = 1;
}
else
{
- hm_device[opencl_device_index].fan_supported = 0;
+ hm_device[opencl_device_index].fan_get_supported = 0;
}
}
else // od_version == 6
if (faninfo.iSpeedType & ADL_OD6_FANSPEED_TYPE_PERCENT)
{
- hm_device[opencl_device_index].fan_supported = 1;
+ hm_device[opencl_device_index].fan_get_supported = 1;
}
else
{
- hm_device[opencl_device_index].fan_supported = 0;
+ hm_device[opencl_device_index].fan_get_supported = 0;
}
}
}
return 0;
}
-int hm_get_adapter_index_amd (hm_attrs_t *hm_device, u32 *valid_adl_device_list, int num_adl_adapters, LPAdapterInfo lpAdapterInfo)
+int hm_get_adapter_index_adl (hm_attrs_t *hm_device, u32 *valid_adl_device_list, int num_adl_adapters, LPAdapterInfo lpAdapterInfo)
{
for (int i = 0; i < num_adl_adapters; i++)
{
int opencl_device_index = i;
- hm_device[opencl_device_index].adapter_index.amd = info.iAdapterIndex;
+ hm_device[opencl_device_index].adl = info.iAdapterIndex;
}
return num_adl_adapters;
}
-#endif // HAVE_ADL
+
+int hm_get_threshold_slowdown_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+ if (data.hm_adl)
+ {
+ if (data.hm_device[device_id].od_version == 5)
+ {
+
+ }
+ else if (data.hm_device[device_id].od_version == 6)
+ {
+ int CurrentValue = 0;
+ int DefaultValue = 0;
+
+ if (hm_ADL_Overdrive6_TargetTemperatureData_Get (data.hm_adl, data.hm_device[device_id].adl, &CurrentValue, &DefaultValue) != ADL_OK) return -1;
+
+ // the return value has never been tested since hm_ADL_Overdrive6_TargetTemperatureData_Get() never worked on any system. expect problems.
+
+ return DefaultValue;
+ }
+ }
+ }
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ int target = 0;
+
+ if (hm_NVML_nvmlDeviceGetTemperatureThreshold (data.hm_nvml, 1, data.hm_device[device_id].nvml, NVML_TEMPERATURE_THRESHOLD_SLOWDOWN, (unsigned int *) &target) != NVML_SUCCESS) return -1;
+
+ return target;
+ }
+
+ return -1;
+}
+
+int hm_get_threshold_shutdown_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+ if (data.hm_adl)
+ {
+ if (data.hm_device[device_id].od_version == 5)
+ {
+
+ }
+ else if (data.hm_device[device_id].od_version == 6)
+ {
+
+ }
+ }
+ }
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ int target = 0;
+
+ if (hm_NVML_nvmlDeviceGetTemperatureThreshold (data.hm_nvml, 1, data.hm_device[device_id].nvml, NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, (unsigned int *) &target) != NVML_SUCCESS) return -1;
+
+ return target;
+ }
+
+ return -1;
+}
int hm_get_temperature_with_device_id (const uint device_id)
{
if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
- #ifdef HAVE_ADL
- if (data.devices_param[device_id].vendor_id == VENDOR_ID_AMD)
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
- if (data.hm_amd)
+ if (data.hm_adl)
{
if (data.hm_device[device_id].od_version == 5)
{
Temperature.iSize = sizeof (ADLTemperature);
- if (hm_ADL_Overdrive5_Temperature_Get (data.hm_amd, data.hm_device[device_id].adapter_index.amd, 0, &Temperature) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive5_Temperature_Get (data.hm_adl, data.hm_device[device_id].adl, 0, &Temperature) != ADL_OK) return -1;
return Temperature.iTemperature / 1000;
}
{
int Temperature = 0;
- if (hm_ADL_Overdrive6_Temperature_Get (data.hm_amd, data.hm_device[device_id].adapter_index.amd, &Temperature) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive6_Temperature_Get (data.hm_adl, data.hm_device[device_id].adl, &Temperature) != ADL_OK) return -1;
return Temperature / 1000;
}
}
}
- #endif
- #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
- if (data.devices_param[device_id].vendor_id == VENDOR_ID_NV)
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
{
- #if defined(LINUX) && defined(HAVE_NVML)
int temperature = 0;
- hm_NVML_nvmlDeviceGetTemperature (data.hm_nv, data.hm_device[device_id].adapter_index.nv, NVML_TEMPERATURE_GPU, (uint *) &temperature);
+ if (hm_NVML_nvmlDeviceGetTemperature (data.hm_nvml, 1, data.hm_device[device_id].nvml, NVML_TEMPERATURE_GPU, (uint *) &temperature) != NVML_SUCCESS) return -1;
return temperature;
- #endif
+ }
+
+ return -1;
+}
- #if defined(WIN) && defined(HAVE_NVAPI)
- NV_GPU_THERMAL_SETTINGS pThermalSettings;
+int hm_get_fanpolicy_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
+
+ if (data.hm_device[device_id].fan_get_supported == 1)
+ {
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+ if (data.hm_adl)
+ {
+ if (data.hm_device[device_id].od_version == 5)
+ {
+ ADLFanSpeedValue lpFanSpeedValue;
- pThermalSettings.version = NV_GPU_THERMAL_SETTINGS_VER;
- pThermalSettings.count = NVAPI_MAX_THERMAL_SENSORS_PER_GPU;
- pThermalSettings.sensor[0].controller = NVAPI_THERMAL_CONTROLLER_UNKNOWN;
- pThermalSettings.sensor[0].target = NVAPI_THERMAL_TARGET_GPU;
+ memset (&lpFanSpeedValue, 0, sizeof (lpFanSpeedValue));
+
+ lpFanSpeedValue.iSize = sizeof (lpFanSpeedValue);
+ lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_PERCENT;
+
+ if (hm_ADL_Overdrive5_FanSpeed_Get (data.hm_adl, data.hm_device[device_id].adl, 0, &lpFanSpeedValue) != ADL_OK) return -1;
- if (hm_NvAPI_GPU_GetThermalSettings (data.hm_nv, data.hm_device[device_id].adapter_index.nv, 0, &pThermalSettings) != NVAPI_OK) return -1;
+ return (lpFanSpeedValue.iFanSpeed & ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED) ? 0 : 1;
+ }
+ else // od_version == 6
+ {
+ return 1;
+ }
+ }
+ }
- return pThermalSettings.sensor[0].currentTemp;
- #endif // WIN && HAVE_NVAPI
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ return 1;
+ }
}
- #endif // HAVE_NVML || HAVE_NVAPI
return -1;
}
int hm_get_fanspeed_with_device_id (const uint device_id)
{
- // we shouldn't really need this extra CL_DEVICE_TYPE_GPU check, because fan_supported should not be set w/ CPUs
if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
- if (data.hm_device[device_id].fan_supported == 1)
+ if (data.hm_device[device_id].fan_get_supported == 1)
{
- #ifdef HAVE_ADL
- if (data.devices_param[device_id].vendor_id == VENDOR_ID_AMD)
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
- if (data.hm_amd)
+ if (data.hm_adl)
{
if (data.hm_device[device_id].od_version == 5)
{
lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_PERCENT;
lpFanSpeedValue.iFlags = ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED;
- if (hm_ADL_Overdrive5_FanSpeed_Get (data.hm_amd, data.hm_device[device_id].adapter_index.amd, 0, &lpFanSpeedValue) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive5_FanSpeed_Get (data.hm_adl, data.hm_device[device_id].adl, 0, &lpFanSpeedValue) != ADL_OK) return -1;
return lpFanSpeedValue.iFanSpeed;
}
memset (&faninfo, 0, sizeof (faninfo));
- if (hm_ADL_Overdrive6_FanSpeed_Get (data.hm_amd, data.hm_device[device_id].adapter_index.amd, &faninfo) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive6_FanSpeed_Get (data.hm_adl, data.hm_device[device_id].adl, &faninfo) != ADL_OK) return -1;
return faninfo.iFanSpeedPercent;
}
}
}
- #endif // HAVE_ADL
- #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
- if (data.devices_param[device_id].vendor_id == VENDOR_ID_NV)
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
{
- #if defined(LINUX) && defined(HAVE_NVML)
int speed = 0;
- hm_NVML_nvmlDeviceGetFanSpeed (data.hm_nv, 1, data.hm_device[device_id].adapter_index.nv, (uint *) &speed);
+ if (hm_NVML_nvmlDeviceGetFanSpeed (data.hm_nvml, 0, data.hm_device[device_id].nvml, (uint *) &speed) != NVML_SUCCESS) return -1;
return speed;
- #endif
+ }
+ }
- #if defined(WIN) && defined(HAVE_NVAPI)
+ return -1;
+}
- NV_GPU_COOLER_SETTINGS pCoolerSettings;
+int hm_get_buslanes_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
- pCoolerSettings.Version = GPU_COOLER_SETTINGS_VER | sizeof (NV_GPU_COOLER_SETTINGS);
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+ if (data.hm_adl)
+ {
+ ADLPMActivity PMActivity;
- hm_NvAPI_GPU_GetCoolerSettings (data.hm_nv, data.hm_device[device_id].adapter_index.nv, 0, &pCoolerSettings);
+ PMActivity.iSize = sizeof (ADLPMActivity);
- return pCoolerSettings.Cooler[0].CurrentLevel;
- #endif
+ if (hm_ADL_Overdrive_CurrentActivity_Get (data.hm_adl, data.hm_device[device_id].adl, &PMActivity) != ADL_OK) return -1;
+
+ return PMActivity.iCurrentBusLanes;
}
- #endif // HAVE_NVML || HAVE_NVAPI
+ }
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ unsigned int currLinkWidth;
+
+ if (hm_NVML_nvmlDeviceGetCurrPcieLinkWidth (data.hm_nvml, 1, data.hm_device[device_id].nvml, &currLinkWidth) != NVML_SUCCESS) return -1;
+
+ return currLinkWidth;
}
return -1;
{
if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
- #ifdef HAVE_ADL
- if (data.devices_param[device_id].vendor_id == VENDOR_ID_AMD)
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
- if (data.hm_amd)
+ if (data.hm_adl)
{
ADLPMActivity PMActivity;
PMActivity.iSize = sizeof (ADLPMActivity);
- if (hm_ADL_Overdrive_CurrentActivity_Get (data.hm_amd, data.hm_device[device_id].adapter_index.amd, &PMActivity) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive_CurrentActivity_Get (data.hm_adl, data.hm_device[device_id].adl, &PMActivity) != ADL_OK) return -1;
return PMActivity.iActivityPercent;
}
}
- #endif // HAVE_ADL
- #if defined(HAVE_NVML) || defined(HAVE_NVAPI)
- if (data.devices_param[device_id].vendor_id == VENDOR_ID_NV)
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
{
- #if defined(LINUX) && defined(HAVE_NVML)
nvmlUtilization_t utilization;
- hm_NVML_nvmlDeviceGetUtilizationRates (data.hm_nv, data.hm_device[device_id].adapter_index.nv, &utilization);
+ if (hm_NVML_nvmlDeviceGetUtilizationRates (data.hm_nvml, 1, data.hm_device[device_id].nvml, &utilization) != NVML_SUCCESS) return -1;
return utilization.gpu;
- #endif
+ }
+
+ return -1;
+}
+
+int hm_get_memoryspeed_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
- #if defined(WIN) && defined(HAVE_NVAPI)
- NV_GPU_DYNAMIC_PSTATES_INFO_EX pDynamicPstatesInfoEx;
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+ if (data.hm_adl)
+ {
+ ADLPMActivity PMActivity;
- pDynamicPstatesInfoEx.version = NV_GPU_DYNAMIC_PSTATES_INFO_EX_VER;
+ PMActivity.iSize = sizeof (ADLPMActivity);
- if (hm_NvAPI_GPU_GetDynamicPstatesInfoEx (data.hm_nv, data.hm_device[device_id].adapter_index.nv, &pDynamicPstatesInfoEx) != NVAPI_OK) return -1;
+ if (hm_ADL_Overdrive_CurrentActivity_Get (data.hm_adl, data.hm_device[device_id].adl, &PMActivity) != ADL_OK) return -1;
- return pDynamicPstatesInfoEx.utilization[0].percentage;
- #endif
+ return PMActivity.iMemoryClock / 100;
+ }
+ }
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ unsigned int clock;
+
+ if (hm_NVML_nvmlDeviceGetClockInfo (data.hm_nvml, 1, data.hm_device[device_id].nvml, NVML_CLOCK_MEM, &clock) != NVML_SUCCESS) return -1;
+
+ return clock;
+ }
+
+ return -1;
+}
+
+int hm_get_corespeed_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+ if (data.hm_adl)
+ {
+ ADLPMActivity PMActivity;
+
+ PMActivity.iSize = sizeof (ADLPMActivity);
+
+ if (hm_ADL_Overdrive_CurrentActivity_Get (data.hm_adl, data.hm_device[device_id].adl, &PMActivity) != ADL_OK) return -1;
+
+ return PMActivity.iEngineClock / 100;
+ }
+ }
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ unsigned int clock;
+
+ if (hm_NVML_nvmlDeviceGetClockInfo (data.hm_nvml, 1, data.hm_device[device_id].nvml, NVML_CLOCK_SM, &clock) != NVML_SUCCESS) return -1;
+
+ return clock;
+ }
+
+ return -1;
+}
+
+int hm_get_throttle_with_device_id (const uint device_id)
+{
+ if ((data.devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
+ {
+
+ }
+
+ if (data.devices_param[device_id].device_vendor_id == VENDOR_ID_NV)
+ {
+ unsigned long long clocksThrottleReasons = 0;
+ unsigned long long supportedThrottleReasons = 0;
+
+ if (hm_NVML_nvmlDeviceGetCurrentClocksThrottleReasons (data.hm_nvml, 1, data.hm_device[device_id].nvml, &clocksThrottleReasons) != NVML_SUCCESS) return -1;
+ if (hm_NVML_nvmlDeviceGetSupportedClocksThrottleReasons (data.hm_nvml, 1, data.hm_device[device_id].nvml, &supportedThrottleReasons) != NVML_SUCCESS) return -1;
+
+ clocksThrottleReasons &= supportedThrottleReasons;
+
+ clocksThrottleReasons &= ~nvmlClocksThrottleReasonUnknown;
+
+ return (clocksThrottleReasons > 0);
}
- #endif // HAVE_NVML || HAVE_NVAPI
return -1;
}
-#ifdef HAVE_ADL
-int hm_set_fanspeed_with_device_id_amd (const uint device_id, const int fanspeed)
+int hm_set_fanspeed_with_device_id_adl (const uint device_id, const int fanspeed, const int fanpolicy)
{
- if (data.hm_device[device_id].fan_supported == 1)
+ if (data.hm_device[device_id].fan_set_supported == 1)
{
- if (data.hm_amd)
+ if (data.hm_adl)
{
if (data.hm_device[device_id].od_version == 5)
{
lpFanSpeedValue.iSize = sizeof (lpFanSpeedValue);
lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_PERCENT;
- lpFanSpeedValue.iFlags = ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED;
+ lpFanSpeedValue.iFlags = (fanpolicy == 1) ? ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED : 0;
lpFanSpeedValue.iFanSpeed = fanspeed;
- if (hm_ADL_Overdrive5_FanSpeed_Set (data.hm_amd, data.hm_device[device_id].adapter_index.amd, 0, &lpFanSpeedValue) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive5_FanSpeed_Set (data.hm_adl, data.hm_device[device_id].adl, 0, &lpFanSpeedValue) != ADL_OK) return -1;
return 0;
}
fan_speed_value.iSpeedType = ADL_OD6_FANSPEED_TYPE_PERCENT;
fan_speed_value.iFanSpeed = fanspeed;
- if (hm_ADL_Overdrive6_FanSpeed_Set (data.hm_amd, data.hm_device[device_id].adapter_index.amd, &fan_speed_value) != ADL_OK) return -1;
+ if (hm_ADL_Overdrive6_FanSpeed_Set (data.hm_adl, data.hm_device[device_id].adl, &fan_speed_value) != ADL_OK) return -1;
return 0;
}
return -1;
}
-#endif
-
-// helper function for status display
-void hm_device_val_to_str (char *target_buf, int max_buf_size, char *suffix, int value)
+int hm_set_fanspeed_with_device_id_nvapi (const uint device_id, const int fanspeed, const int fanpolicy)
{
- #define VALUE_NOT_AVAILABLE "N/A"
-
- if (value == -1)
+ if (data.hm_device[device_id].fan_set_supported == 1)
{
- snprintf (target_buf, max_buf_size, VALUE_NOT_AVAILABLE);
+ if (data.hm_nvapi)
+ {
+ NV_GPU_COOLER_LEVELS CoolerLevels = { 0 };
+
+ CoolerLevels.Version = GPU_COOLER_LEVELS_VER | sizeof (NV_GPU_COOLER_LEVELS);
+
+ CoolerLevels.Levels[0].Level = fanspeed;
+ CoolerLevels.Levels[0].Policy = fanpolicy;
+
+ if (hm_NvAPI_GPU_SetCoolerLevels (data.hm_nvapi, data.hm_device[device_id].nvapi, 0, &CoolerLevels) != NVAPI_OK) return -1;
+
+ return 0;
+ }
}
- else
+
+ return -1;
+}
+
+int hm_set_fanspeed_with_device_id_xnvctrl (const uint device_id, const int fanspeed)
+{
+ if (data.hm_device[device_id].fan_set_supported == 1)
{
- snprintf (target_buf, max_buf_size, "%2d%s", value, suffix);
+ if (data.hm_xnvctrl)
+ {
+ if (set_fan_speed_target (data.hm_xnvctrl, data.hm_device[device_id].xnvctrl, fanspeed) != 0) return -1;
+
+ return 0;
+ }
}
+
+ return -1;
}
+
#endif // HAVE_HWMON
/**
void set_cpu_affinity (char *cpu_affinity)
{
- #ifdef WIN
+ #ifdef _WIN
DWORD_PTR aff_mask = 0;
#elif _POSIX
cpu_set_t cpuset;
if (cpu_id == 0)
{
- #ifdef WIN
+ #ifdef _WIN
aff_mask = 0;
#elif _POSIX
CPU_ZERO (&cpuset);
exit (-1);
}
- #ifdef WIN
+ #ifdef _WIN
aff_mask |= 1 << (cpu_id - 1);
#elif _POSIX
CPU_SET ((cpu_id - 1), &cpuset);
free (devices);
}
- #ifdef WIN
+ #ifdef _WIN
SetProcessAffinityMask (GetCurrentProcess (), aff_mask);
SetThreadAffinityMask (GetCurrentThread (), aff_mask);
#elif _POSIX
dictstat_t *d1 = (dictstat_t *) s1;
dictstat_t *d2 = (dictstat_t *) s2;
- #ifdef LINUX
+ #ifdef _POSIX
d2->stat.st_atim = d1->stat.st_atim;
#else
d2->stat.st_atime = d1->stat.st_atime;
case 13761: return ((char *) HT_13761); break;
case 13762: return ((char *) HT_13762); break;
case 13763: return ((char *) HT_13763); break;
+ case 13800: return ((char *) HT_13800); break;
}
return ((char *) "Unknown");
{
snprintf (out_buf, len-1, "%s", hashfile);
}
+ else if (hash_mode == 13800)
+ {
+ win8phone_t *esalts = (win8phone_t *) data.esalts_buf;
+
+ win8phone_t *esalt = &esalts[salt_pos];
+
+ char buf[256 + 1] = { 0 };
+
+ for (int i = 0, j = 0; i < 32; i += 1, j += 8)
+ {
+ sprintf (buf + j, "%08x", esalt->salt_buf[i]);
+ }
+
+ snprintf (out_buf, len-1, "%08x%08x%08x%08x%08x%08x%08x%08x:%s",
+ digest_buf[0],
+ digest_buf[1],
+ digest_buf[2],
+ digest_buf[3],
+ digest_buf[4],
+ digest_buf[5],
+ digest_buf[6],
+ digest_buf[7],
+ buf);
+ }
else
{
if (hash_type == HASH_TYPE_MD4)
u32 *digest = (u32 *) hash_buf->digest;
- input_buf +=14;
+ input_buf += 14;
digest[0] = hex_to_u32 ((const u8 *) &input_buf[ 0]);
digest[1] = hex_to_u32 ((const u8 *) &input_buf[ 8]);
digest[2] = hex_to_u32 ((const u8 *) &input_buf[16]);
digest[3] = hex_to_u32 ((const u8 *) &input_buf[24]);
- digest[4] = 0x00000000;
+ digest[4] = 0;
return (PARSER_OK);
}
pstoken->pc_offset = 0;
- for (int i = 0; i < (int) pstoken->salt_len - 64; i += 64)
+ for (int i = 0; i < (int) pstoken->salt_len - 63; i += 64)
{
uint w[16];
return (PARSER_OK);
}
+int win8phone_parse_hash (char *input_buf, uint input_len, hash_t *hash_buf)
+{
+ if ((input_len < DISPLAY_LEN_MIN_13800) || (input_len > DISPLAY_LEN_MAX_13800)) return (PARSER_GLOBAL_LENGTH);
+
+ u32 *digest = (u32 *) hash_buf->digest;
+
+ salt_t *salt = hash_buf->salt;
+
+ win8phone_t *esalt = hash_buf->esalt;
+
+ digest[0] = hex_to_u32 ((const u8 *) &input_buf[ 0]);
+ digest[1] = hex_to_u32 ((const u8 *) &input_buf[ 8]);
+ digest[2] = hex_to_u32 ((const u8 *) &input_buf[16]);
+ digest[3] = hex_to_u32 ((const u8 *) &input_buf[24]);
+ digest[4] = hex_to_u32 ((const u8 *) &input_buf[32]);
+ digest[5] = hex_to_u32 ((const u8 *) &input_buf[40]);
+ digest[6] = hex_to_u32 ((const u8 *) &input_buf[48]);
+ digest[7] = hex_to_u32 ((const u8 *) &input_buf[56]);
+
+ if (input_buf[64] != data.separator) return (PARSER_SEPARATOR_UNMATCHED);
+
+ char *salt_buf_ptr = input_buf + 64 + 1;
+
+ u32 *salt_buf = esalt->salt_buf;
+
+ for (int i = 0, j = 0; i < 32; i += 1, j += 8)
+ {
+ salt_buf[i] = hex_to_u32 ((const u8 *) &salt_buf_ptr[j]);
+ }
+
+ salt->salt_buf[0] = salt_buf[0];
+ salt->salt_buf[1] = salt_buf[1];
+ salt->salt_buf[2] = salt_buf[2];
+ salt->salt_buf[3] = salt_buf[3];
+ salt->salt_buf[4] = salt_buf[4];
+ salt->salt_buf[5] = salt_buf[5];
+ salt->salt_buf[6] = salt_buf[6];
+ salt->salt_buf[7] = salt_buf[7];
+
+ salt->salt_len = 64;
+
+ return (PARSER_OK);
+}
+
/**
* parallel running threads
*/