From ce5df4a63d9135a34217beda7a09e56cc17d10d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 16:09:12 +0000 Subject: [PATCH] Bump github.com/NVIDIA/go-nvml from 0.12.0-6 to 0.12.4-0 Bumps [github.com/NVIDIA/go-nvml](https://github.com/NVIDIA/go-nvml) from 0.12.0-6 to 0.12.4-0. - [Commits](https://github.com/NVIDIA/go-nvml/compare/v0.12.0-6...v0.12.4-0) --- updated-dependencies: - dependency-name: github.com/NVIDIA/go-nvml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 8 +- .../NVIDIA/go-nvml/pkg/nvml/const.go | 983 ++- .../NVIDIA/go-nvml/pkg/nvml/device.go | 759 +- .../NVIDIA/go-nvml/pkg/nvml/event_set.go | 31 +- .../github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go | 55 +- .../github.com/NVIDIA/go-nvml/pkg/nvml/lib.go | 5 + .../NVIDIA/go-nvml/pkg/nvml/mock/device.go | 2581 ++++++- .../go-nvml/pkg/nvml/mock/gpuinstance.go | 316 +- .../NVIDIA/go-nvml/pkg/nvml/mock/interface.go | 4267 ++++++++++-- .../go-nvml/pkg/nvml/mock/vgpuinstance.go | 37 + .../go-nvml/pkg/nvml/mock/vgputypeid.go | 125 + .../NVIDIA/go-nvml/pkg/nvml/nvml.go | 1158 ++- .../github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h | 6202 +++++++++++++---- .../NVIDIA/go-nvml/pkg/nvml/system.go | 67 + .../NVIDIA/go-nvml/pkg/nvml/types_gen.go | 874 ++- .../NVIDIA/go-nvml/pkg/nvml/vgpu.go | 29 + .../go-nvml/pkg/nvml/zz_generated.api.go | 809 ++- .../testify/assert/assertion_compare.go | 35 +- .../testify/assert/assertion_format.go | 34 +- .../testify/assert/assertion_forward.go | 68 +- .../testify/assert/assertion_order.go | 10 +- .../stretchr/testify/assert/assertions.go | 157 +- .../testify/assert/yaml/yaml_custom.go | 25 + .../testify/assert/yaml/yaml_default.go | 37 + .../stretchr/testify/assert/yaml/yaml_fail.go | 18 + .../stretchr/testify/require/require.go | 432 +- .../stretchr/testify/require/require.go.tmpl | 2 +- .../testify/require/require_forward.go | 68 +- .../stretchr/testify/require/requirements.go | 2 +- vendor/modules.txt | 5 +- 31 files changed, 16009 insertions(+), 3194 deletions(-) create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go diff --git a/go.mod b/go.mod index 6c44c45..71f0c68 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.20 require ( github.com/NVIDIA/go-nvlib v0.5.0 - github.com/NVIDIA/go-nvml v0.12.0-6 - github.com/stretchr/testify v1.9.0 + github.com/NVIDIA/go-nvml v0.12.9-0 + github.com/stretchr/testify v1.10.0 ) require ( diff --git a/go.sum b/go.sum index 6aeaad0..71cea1f 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,15 @@ github.com/NVIDIA/go-nvlib v0.5.0 h1:951KGrfr+p3cs89alO9z/ZxPPWKxwht9tx9rxiADoLI= github.com/NVIDIA/go-nvlib v0.5.0/go.mod h1:87z49ULPr4GWPSGfSIp3taU4XENRYN/enIg88MzcL4k= -github.com/NVIDIA/go-nvml v0.12.0-6 h1:FJYc2KrpvX+VOC/8QQvMiQMmZ/nPMRpdJO/Ik4xfcr0= -github.com/NVIDIA/go-nvml v0.12.0-6/go.mod h1:8Llmj+1Rr+9VGGwZuRer5N/aCjxGuR5nPb/9ebBiIEQ= +github.com/NVIDIA/go-nvml v0.12.9-0 h1:e344UK8ZkeMeeLkdQtRhmXRxNf+u532LDZPGMtkdus0= +github.com/NVIDIA/go-nvml v0.12.9-0/go.mod h1:+KNA7c7gIBH7SKSJ1ntlwkfN80zdx8ovl4hrK3LmPt4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go index f4cecfb..09e82fc 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go @@ -31,9 +31,9 @@ const ( // NO_UNVERSIONED_FUNC_DEFS as defined in go-nvml/:24 NO_UNVERSIONED_FUNC_DEFS = 1 // API_VERSION as defined in nvml/nvml.h - API_VERSION = 11 + API_VERSION = 12 // API_VERSION_STR as defined in nvml/nvml.h - API_VERSION_STR = "11" + API_VERSION_STR = "12" // VALUE_NOT_AVAILABLE as defined in nvml/nvml.h VALUE_NOT_AVAILABLE = -1 // DEVICE_PCI_BUS_ID_BUFFER_SIZE as defined in nvml/nvml.h @@ -52,6 +52,10 @@ const ( MAX_PHYSICAL_BRIDGE = 128 // MAX_THERMAL_SENSORS_PER_GPU as defined in nvml/nvml.h MAX_THERMAL_SENSORS_PER_GPU = 3 + // DEVICE_UUID_ASCII_LEN as defined in nvml/nvml.h + DEVICE_UUID_ASCII_LEN = 41 + // DEVICE_UUID_BINARY_LEN as defined in nvml/nvml.h + DEVICE_UUID_BINARY_LEN = 16 // FlagDefault as defined in nvml/nvml.h FlagDefault = 0 // FlagForce as defined in nvml/nvml.h @@ -62,46 +66,8 @@ const ( DOUBLE_BIT_ECC = 0 // MAX_GPU_PERF_PSTATES as defined in nvml/nvml.h MAX_GPU_PERF_PSTATES = 16 - // GRID_LICENSE_EXPIRY_NOT_AVAILABLE as defined in nvml/nvml.h - GRID_LICENSE_EXPIRY_NOT_AVAILABLE = 0 - // GRID_LICENSE_EXPIRY_INVALID as defined in nvml/nvml.h - GRID_LICENSE_EXPIRY_INVALID = 1 - // GRID_LICENSE_EXPIRY_VALID as defined in nvml/nvml.h - GRID_LICENSE_EXPIRY_VALID = 2 - // GRID_LICENSE_EXPIRY_NOT_APPLICABLE as defined in nvml/nvml.h - GRID_LICENSE_EXPIRY_NOT_APPLICABLE = 3 - // GRID_LICENSE_EXPIRY_PERMANENT as defined in nvml/nvml.h - GRID_LICENSE_EXPIRY_PERMANENT = 4 - // GRID_LICENSE_BUFFER_SIZE as defined in nvml/nvml.h - GRID_LICENSE_BUFFER_SIZE = 128 - // VGPU_NAME_BUFFER_SIZE as defined in nvml/nvml.h - VGPU_NAME_BUFFER_SIZE = 64 - // GRID_LICENSE_FEATURE_MAX_COUNT as defined in nvml/nvml.h - GRID_LICENSE_FEATURE_MAX_COUNT = 3 - // VGPU_SCHEDULER_POLICY_UNKNOWN as defined in nvml/nvml.h - VGPU_SCHEDULER_POLICY_UNKNOWN = 0 - // VGPU_SCHEDULER_POLICY_BEST_EFFORT as defined in nvml/nvml.h - VGPU_SCHEDULER_POLICY_BEST_EFFORT = 1 - // VGPU_SCHEDULER_POLICY_EQUAL_SHARE as defined in nvml/nvml.h - VGPU_SCHEDULER_POLICY_EQUAL_SHARE = 2 - // VGPU_SCHEDULER_POLICY_FIXED_SHARE as defined in nvml/nvml.h - VGPU_SCHEDULER_POLICY_FIXED_SHARE = 3 - // SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT as defined in nvml/nvml.h - SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT = 3 - // SCHEDULER_SW_MAX_LOG_ENTRIES as defined in nvml/nvml.h - SCHEDULER_SW_MAX_LOG_ENTRIES = 200 - // GRID_LICENSE_STATE_UNKNOWN as defined in nvml/nvml.h - GRID_LICENSE_STATE_UNKNOWN = 0 - // GRID_LICENSE_STATE_UNINITIALIZED as defined in nvml/nvml.h - GRID_LICENSE_STATE_UNINITIALIZED = 1 - // GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED as defined in nvml/nvml.h - GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED = 2 - // GRID_LICENSE_STATE_UNLICENSED_RESTRICTED as defined in nvml/nvml.h - GRID_LICENSE_STATE_UNLICENSED_RESTRICTED = 3 - // GRID_LICENSE_STATE_UNLICENSED as defined in nvml/nvml.h - GRID_LICENSE_STATE_UNLICENSED = 4 - // GRID_LICENSE_STATE_LICENSED as defined in nvml/nvml.h - GRID_LICENSE_STATE_LICENSED = 5 + // PERF_MODES_BUFFER_SIZE as defined in nvml/nvml.h + PERF_MODES_BUFFER_SIZE = 2048 // GSP_FIRMWARE_VERSION_BUF_SIZE as defined in nvml/nvml.h GSP_FIRMWARE_VERSION_BUF_SIZE = 64 // DEVICE_ARCH_KEPLER as defined in nvml/nvml.h @@ -120,6 +86,10 @@ const ( DEVICE_ARCH_ADA = 8 // DEVICE_ARCH_HOPPER as defined in nvml/nvml.h DEVICE_ARCH_HOPPER = 9 + // DEVICE_ARCH_BLACKWELL as defined in nvml/nvml.h + DEVICE_ARCH_BLACKWELL = 10 + // DEVICE_ARCH_T23X as defined in nvml/nvml.h + DEVICE_ARCH_T23X = 11 // DEVICE_ARCH_UNKNOWN as defined in nvml/nvml.h DEVICE_ARCH_UNKNOWN = 4294967295 // BUS_TYPE_UNKNOWN as defined in nvml/nvml.h @@ -140,6 +110,8 @@ const ( POWER_SOURCE_AC = 0 // POWER_SOURCE_BATTERY as defined in nvml/nvml.h POWER_SOURCE_BATTERY = 1 + // POWER_SOURCE_UNDERSIZED as defined in nvml/nvml.h + POWER_SOURCE_UNDERSIZED = 2 // PCIE_LINK_MAX_SPEED_INVALID as defined in nvml/nvml.h PCIE_LINK_MAX_SPEED_INVALID = 0 // PCIE_LINK_MAX_SPEED_2500MBPS as defined in nvml/nvml.h @@ -160,6 +132,82 @@ const ( ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED = 1 // MAX_GPU_UTILIZATIONS as defined in nvml/nvml.h MAX_GPU_UTILIZATIONS = 8 + // PCIE_ATOMICS_CAP_FETCHADD32 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_FETCHADD32 = 1 + // PCIE_ATOMICS_CAP_FETCHADD64 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_FETCHADD64 = 2 + // PCIE_ATOMICS_CAP_SWAP32 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_SWAP32 = 4 + // PCIE_ATOMICS_CAP_SWAP64 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_SWAP64 = 8 + // PCIE_ATOMICS_CAP_CAS32 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_CAS32 = 16 + // PCIE_ATOMICS_CAP_CAS64 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_CAS64 = 32 + // PCIE_ATOMICS_CAP_CAS128 as defined in nvml/nvml.h + PCIE_ATOMICS_CAP_CAS128 = 64 + // PCIE_ATOMICS_OPS_MAX as defined in nvml/nvml.h + PCIE_ATOMICS_OPS_MAX = 7 + // POWER_SCOPE_GPU as defined in nvml/nvml.h + POWER_SCOPE_GPU = 0 + // POWER_SCOPE_MODULE as defined in nvml/nvml.h + POWER_SCOPE_MODULE = 1 + // POWER_SCOPE_MEMORY as defined in nvml/nvml.h + POWER_SCOPE_MEMORY = 2 + // GRID_LICENSE_EXPIRY_NOT_AVAILABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_AVAILABLE = 0 + // GRID_LICENSE_EXPIRY_INVALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_INVALID = 1 + // GRID_LICENSE_EXPIRY_VALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_VALID = 2 + // GRID_LICENSE_EXPIRY_NOT_APPLICABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_APPLICABLE = 3 + // GRID_LICENSE_EXPIRY_PERMANENT as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_PERMANENT = 4 + // GRID_LICENSE_BUFFER_SIZE as defined in nvml/nvml.h + GRID_LICENSE_BUFFER_SIZE = 128 + // VGPU_NAME_BUFFER_SIZE as defined in nvml/nvml.h + VGPU_NAME_BUFFER_SIZE = 64 + // GRID_LICENSE_FEATURE_MAX_COUNT as defined in nvml/nvml.h + GRID_LICENSE_FEATURE_MAX_COUNT = 3 + // INVALID_VGPU_PLACEMENT_ID as defined in nvml/nvml.h + INVALID_VGPU_PLACEMENT_ID = 65535 + // VGPU_PGPU_HETEROGENEOUS_MODE as defined in nvml/nvml.h + VGPU_PGPU_HETEROGENEOUS_MODE = 0 + // VGPU_PGPU_HOMOGENEOUS_MODE as defined in nvml/nvml.h + VGPU_PGPU_HOMOGENEOUS_MODE = 1 + // VGPU_SCHEDULER_POLICY_UNKNOWN as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_UNKNOWN = 0 + // VGPU_SCHEDULER_POLICY_BEST_EFFORT as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_BEST_EFFORT = 1 + // VGPU_SCHEDULER_POLICY_EQUAL_SHARE as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_EQUAL_SHARE = 2 + // VGPU_SCHEDULER_POLICY_FIXED_SHARE as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_FIXED_SHARE = 3 + // SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT as defined in nvml/nvml.h + SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT = 3 + // SCHEDULER_SW_MAX_LOG_ENTRIES as defined in nvml/nvml.h + SCHEDULER_SW_MAX_LOG_ENTRIES = 200 + // VGPU_SCHEDULER_ARR_DEFAULT as defined in nvml/nvml.h + VGPU_SCHEDULER_ARR_DEFAULT = 0 + // VGPU_SCHEDULER_ARR_DISABLE as defined in nvml/nvml.h + VGPU_SCHEDULER_ARR_DISABLE = 1 + // VGPU_SCHEDULER_ARR_ENABLE as defined in nvml/nvml.h + VGPU_SCHEDULER_ARR_ENABLE = 2 + // VGPU_SCHEDULER_ENGINE_TYPE_GRAPHICS as defined in nvml/nvml.h + VGPU_SCHEDULER_ENGINE_TYPE_GRAPHICS = 1 + // GRID_LICENSE_STATE_UNKNOWN as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNKNOWN = 0 + // GRID_LICENSE_STATE_UNINITIALIZED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNINITIALIZED = 1 + // GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED = 2 + // GRID_LICENSE_STATE_UNLICENSED_RESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_RESTRICTED = 3 + // GRID_LICENSE_STATE_UNLICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED = 4 + // GRID_LICENSE_STATE_LICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_LICENSED = 5 // FI_DEV_ECC_CURRENT as defined in nvml/nvml.h FI_DEV_ECC_CURRENT = 1 // FI_DEV_ECC_PENDING as defined in nvml/nvml.h @@ -498,8 +546,242 @@ const ( FI_DEV_NVLINK_GET_POWER_THRESHOLD = 168 // FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER as defined in nvml/nvml.h FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER = 169 + // FI_DEV_C2C_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_C2C_LINK_COUNT = 170 + // FI_DEV_C2C_LINK_GET_STATUS as defined in nvml/nvml.h + FI_DEV_C2C_LINK_GET_STATUS = 171 + // FI_DEV_C2C_LINK_GET_MAX_BW as defined in nvml/nvml.h + FI_DEV_C2C_LINK_GET_MAX_BW = 172 + // FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS = 173 + // FI_DEV_PCIE_COUNT_NAKS_RECEIVED as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_NAKS_RECEIVED = 174 + // FI_DEV_PCIE_COUNT_RECEIVER_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_RECEIVER_ERROR = 175 + // FI_DEV_PCIE_COUNT_BAD_TLP as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_BAD_TLP = 176 + // FI_DEV_PCIE_COUNT_NAKS_SENT as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_NAKS_SENT = 177 + // FI_DEV_PCIE_COUNT_BAD_DLLP as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_BAD_DLLP = 178 + // FI_DEV_PCIE_COUNT_NON_FATAL_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_NON_FATAL_ERROR = 179 + // FI_DEV_PCIE_COUNT_FATAL_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_FATAL_ERROR = 180 + // FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ = 181 + // FI_DEV_PCIE_COUNT_LCRC_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_LCRC_ERROR = 182 + // FI_DEV_PCIE_COUNT_LANE_ERROR as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_LANE_ERROR = 183 + // FI_DEV_IS_RESETLESS_MIG_SUPPORTED as defined in nvml/nvml.h + FI_DEV_IS_RESETLESS_MIG_SUPPORTED = 184 + // FI_DEV_POWER_AVERAGE as defined in nvml/nvml.h + FI_DEV_POWER_AVERAGE = 185 + // FI_DEV_POWER_INSTANT as defined in nvml/nvml.h + FI_DEV_POWER_INSTANT = 186 + // FI_DEV_POWER_MIN_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_MIN_LIMIT = 187 + // FI_DEV_POWER_MAX_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_MAX_LIMIT = 188 + // FI_DEV_POWER_DEFAULT_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_DEFAULT_LIMIT = 189 + // FI_DEV_POWER_CURRENT_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_CURRENT_LIMIT = 190 + // FI_DEV_ENERGY as defined in nvml/nvml.h + FI_DEV_ENERGY = 191 + // FI_DEV_POWER_REQUESTED_LIMIT as defined in nvml/nvml.h + FI_DEV_POWER_REQUESTED_LIMIT = 192 + // FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT = 193 + // FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT = 194 + // FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT = 195 + // FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT as defined in nvml/nvml.h + FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT = 196 + // FI_DEV_PCIE_COUNT_TX_BYTES as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_TX_BYTES = 197 + // FI_DEV_PCIE_COUNT_RX_BYTES as defined in nvml/nvml.h + FI_DEV_PCIE_COUNT_RX_BYTES = 198 + // FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE as defined in nvml/nvml.h + FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE = 199 + // FI_DEV_NVLINK_GET_POWER_THRESHOLD_MAX as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_THRESHOLD_MAX = 200 + // FI_DEV_NVLINK_COUNT_XMIT_PACKETS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_XMIT_PACKETS = 201 + // FI_DEV_NVLINK_COUNT_XMIT_BYTES as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_XMIT_BYTES = 202 + // FI_DEV_NVLINK_COUNT_RCV_PACKETS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RCV_PACKETS = 203 + // FI_DEV_NVLINK_COUNT_RCV_BYTES as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RCV_BYTES = 204 + // FI_DEV_NVLINK_COUNT_VL15_DROPPED as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_VL15_DROPPED = 205 + // FI_DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS = 206 + // FI_DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS = 207 + // FI_DEV_NVLINK_COUNT_RCV_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RCV_ERRORS = 208 + // FI_DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS = 209 + // FI_DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS = 210 + // FI_DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS = 211 + // FI_DEV_NVLINK_COUNT_XMIT_DISCARDS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_XMIT_DISCARDS = 212 + // FI_DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS = 213 + // FI_DEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTS = 214 + // FI_DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS = 215 + // FI_DEV_NVLINK_COUNT_RAW_BER_LANE0 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RAW_BER_LANE0 = 216 + // FI_DEV_NVLINK_COUNT_RAW_BER_LANE1 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RAW_BER_LANE1 = 217 + // FI_DEV_NVLINK_COUNT_RAW_BER as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_RAW_BER = 218 + // FI_DEV_NVLINK_COUNT_EFFECTIVE_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_EFFECTIVE_ERRORS = 219 + // FI_DEV_NVLINK_COUNT_EFFECTIVE_BER as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_EFFECTIVE_BER = 220 + // FI_DEV_NVLINK_COUNT_SYMBOL_ERRORS as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_SYMBOL_ERRORS = 221 + // FI_DEV_NVLINK_COUNT_SYMBOL_BER as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_SYMBOL_BER = 222 + // FI_DEV_NVLINK_GET_POWER_THRESHOLD_MIN as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_THRESHOLD_MIN = 223 + // FI_DEV_NVLINK_GET_POWER_THRESHOLD_UNITS as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_THRESHOLD_UNITS = 224 + // FI_DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED = 225 + // FI_DEV_RESET_STATUS as defined in nvml/nvml.h + FI_DEV_RESET_STATUS = 226 + // FI_DEV_DRAIN_AND_RESET_STATUS as defined in nvml/nvml.h + FI_DEV_DRAIN_AND_RESET_STATUS = 227 + // FI_DEV_PCIE_OUTBOUND_ATOMICS_MASK as defined in nvml/nvml.h + FI_DEV_PCIE_OUTBOUND_ATOMICS_MASK = 228 + // FI_DEV_PCIE_INBOUND_ATOMICS_MASK as defined in nvml/nvml.h + FI_DEV_PCIE_INBOUND_ATOMICS_MASK = 229 + // FI_DEV_GET_GPU_RECOVERY_ACTION as defined in nvml/nvml.h + FI_DEV_GET_GPU_RECOVERY_ACTION = 230 + // FI_DEV_C2C_LINK_ERROR_INTR as defined in nvml/nvml.h + FI_DEV_C2C_LINK_ERROR_INTR = 231 + // FI_DEV_C2C_LINK_ERROR_REPLAY as defined in nvml/nvml.h + FI_DEV_C2C_LINK_ERROR_REPLAY = 232 + // FI_DEV_C2C_LINK_ERROR_REPLAY_B2B as defined in nvml/nvml.h + FI_DEV_C2C_LINK_ERROR_REPLAY_B2B = 233 + // FI_DEV_C2C_LINK_POWER_STATE as defined in nvml/nvml.h + FI_DEV_C2C_LINK_POWER_STATE = 234 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_0 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_0 = 235 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_1 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_1 = 236 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_2 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_2 = 237 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_3 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_3 = 238 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_4 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_4 = 239 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_5 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_5 = 240 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_6 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_6 = 241 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_7 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_7 = 242 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_8 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_8 = 243 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_9 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_9 = 244 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_10 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_10 = 245 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_11 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_11 = 246 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_12 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_12 = 247 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_13 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_13 = 248 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_14 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_14 = 249 + // FI_DEV_NVLINK_COUNT_FEC_HISTORY_15 as defined in nvml/nvml.h + FI_DEV_NVLINK_COUNT_FEC_HISTORY_15 = 250 + // FI_DEV_CLOCKS_EVENT_REASON_SW_POWER_CAP as defined in nvml/nvml.h + FI_DEV_CLOCKS_EVENT_REASON_SW_POWER_CAP = 74 + // FI_DEV_CLOCKS_EVENT_REASON_SYNC_BOOST as defined in nvml/nvml.h + FI_DEV_CLOCKS_EVENT_REASON_SYNC_BOOST = 76 + // FI_DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN as defined in nvml/nvml.h + FI_DEV_CLOCKS_EVENT_REASON_SW_THERM_SLOWDOWN = 251 + // FI_DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN as defined in nvml/nvml.h + FI_DEV_CLOCKS_EVENT_REASON_HW_THERM_SLOWDOWN = 252 + // FI_DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN as defined in nvml/nvml.h + FI_DEV_CLOCKS_EVENT_REASON_HW_POWER_BRAKE_SLOWDOWN = 253 + // FI_DEV_POWER_SYNC_BALANCING_FREQ as defined in nvml/nvml.h + FI_DEV_POWER_SYNC_BALANCING_FREQ = 254 + // FI_DEV_POWER_SYNC_BALANCING_AF as defined in nvml/nvml.h + FI_DEV_POWER_SYNC_BALANCING_AF = 255 + // FI_PWR_SMOOTHING_ENABLED as defined in nvml/nvml.h + FI_PWR_SMOOTHING_ENABLED = 256 + // FI_PWR_SMOOTHING_PRIV_LVL as defined in nvml/nvml.h + FI_PWR_SMOOTHING_PRIV_LVL = 257 + // FI_PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED as defined in nvml/nvml.h + FI_PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED = 258 + // FI_PWR_SMOOTHING_APPLIED_TMP_CEIL as defined in nvml/nvml.h + FI_PWR_SMOOTHING_APPLIED_TMP_CEIL = 259 + // FI_PWR_SMOOTHING_APPLIED_TMP_FLOOR as defined in nvml/nvml.h + FI_PWR_SMOOTHING_APPLIED_TMP_FLOOR = 260 + // FI_PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING as defined in nvml/nvml.h + FI_PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING = 261 + // FI_PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING as defined in nvml/nvml.h + FI_PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING = 262 + // FI_PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING as defined in nvml/nvml.h + FI_PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING = 263 + // FI_PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES as defined in nvml/nvml.h + FI_PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES = 264 + // FI_PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR as defined in nvml/nvml.h + FI_PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR = 265 + // FI_PWR_SMOOTHING_PROFILE_RAMP_UP_RATE as defined in nvml/nvml.h + FI_PWR_SMOOTHING_PROFILE_RAMP_UP_RATE = 266 + // FI_PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE as defined in nvml/nvml.h + FI_PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE = 267 + // FI_PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL as defined in nvml/nvml.h + FI_PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL = 268 + // FI_PWR_SMOOTHING_ACTIVE_PRESET_PROFILE as defined in nvml/nvml.h + FI_PWR_SMOOTHING_ACTIVE_PRESET_PROFILE = 269 + // FI_PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR as defined in nvml/nvml.h + FI_PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR = 270 + // FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE as defined in nvml/nvml.h + FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE = 271 + // FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE as defined in nvml/nvml.h + FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE = 272 + // FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VAL as defined in nvml/nvml.h + FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VAL = 273 // FI_MAX as defined in nvml/nvml.h - FI_MAX = 170 + FI_MAX = 274 + // NVLINK_LOW_POWER_THRESHOLD_UNIT_100US as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_UNIT_100US = 0 + // NVLINK_LOW_POWER_THRESHOLD_UNIT_50US as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_UNIT_50US = 1 + // NVLINK_POWER_STATE_HIGH_SPEED as defined in nvml/nvml.h + NVLINK_POWER_STATE_HIGH_SPEED = 0 + // NVLINK_POWER_STATE_LOW as defined in nvml/nvml.h + NVLINK_POWER_STATE_LOW = 1 + // NVLINK_LOW_POWER_THRESHOLD_MIN as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_MIN = 1 + // NVLINK_LOW_POWER_THRESHOLD_MAX as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_MAX = 8191 + // NVLINK_LOW_POWER_THRESHOLD_RESET as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_RESET = 4294967295 + // NVLINK_LOW_POWER_THRESHOLD_DEFAULT as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_DEFAULT = 4294967295 + // C2C_POWER_STATE_FULL_POWER as defined in nvml/nvml.h + C2C_POWER_STATE_FULL_POWER = 0 + // C2C_POWER_STATE_LOW_POWER as defined in nvml/nvml.h + C2C_POWER_STATE_LOW_POWER = 1 + // EventTypeNone as defined in nvml/nvml.h + EventTypeNone = 0 // EventTypeSingleBitEccError as defined in nvml/nvml.h EventTypeSingleBitEccError = 1 // EventTypeDoubleBitEccError as defined in nvml/nvml.h @@ -514,28 +796,62 @@ const ( EventTypePowerSourceChange = 128 // EventMigConfigChange as defined in nvml/nvml.h EventMigConfigChange = 256 - // EventTypeNone as defined in nvml/nvml.h - EventTypeNone = 0 + // EventTypeSingleBitEccErrorStorm as defined in nvml/nvml.h + EventTypeSingleBitEccErrorStorm = 512 + // EventTypeDramRetirementEvent as defined in nvml/nvml.h + EventTypeDramRetirementEvent = 1024 + // EventTypeDramRetirementFailure as defined in nvml/nvml.h + EventTypeDramRetirementFailure = 2048 + // EventTypeNonFatalPoisonError as defined in nvml/nvml.h + EventTypeNonFatalPoisonError = 4096 + // EventTypeFatalPoisonError as defined in nvml/nvml.h + EventTypeFatalPoisonError = 8192 + // EventTypeGpuUnavailableError as defined in nvml/nvml.h + EventTypeGpuUnavailableError = 16384 + // EventTypeGpuRecoveryAction as defined in nvml/nvml.h + EventTypeGpuRecoveryAction = 32768 // EventTypeAll as defined in nvml/nvml.h - EventTypeAll = 415 - // ClocksThrottleReasonGpuIdle as defined in nvml/nvml.h - ClocksThrottleReasonGpuIdle = 1 - // ClocksThrottleReasonApplicationsClocksSetting as defined in nvml/nvml.h - ClocksThrottleReasonApplicationsClocksSetting = 2 + EventTypeAll = 65439 + // SystemEventTypeGpuDriverUnbind as defined in nvml/nvml.h + SystemEventTypeGpuDriverUnbind = 1 + // SystemEventTypeGpuDriverBind as defined in nvml/nvml.h + SystemEventTypeGpuDriverBind = 2 + // SystemEventTypeCount as defined in nvml/nvml.h + SystemEventTypeCount = 2 + // ClocksEventReasonGpuIdle as defined in nvml/nvml.h + ClocksEventReasonGpuIdle = 1 + // ClocksEventReasonApplicationsClocksSetting as defined in nvml/nvml.h + ClocksEventReasonApplicationsClocksSetting = 2 // ClocksThrottleReasonUserDefinedClocks as defined in nvml/nvml.h ClocksThrottleReasonUserDefinedClocks = 2 - // ClocksThrottleReasonSwPowerCap as defined in nvml/nvml.h - ClocksThrottleReasonSwPowerCap = 4 + // ClocksEventReasonSwPowerCap as defined in nvml/nvml.h + ClocksEventReasonSwPowerCap = 4 // ClocksThrottleReasonHwSlowdown as defined in nvml/nvml.h ClocksThrottleReasonHwSlowdown = 8 - // ClocksThrottleReasonSyncBoost as defined in nvml/nvml.h - ClocksThrottleReasonSyncBoost = 16 - // ClocksThrottleReasonSwThermalSlowdown as defined in nvml/nvml.h - ClocksThrottleReasonSwThermalSlowdown = 32 + // ClocksEventReasonSyncBoost as defined in nvml/nvml.h + ClocksEventReasonSyncBoost = 16 + // ClocksEventReasonSwThermalSlowdown as defined in nvml/nvml.h + ClocksEventReasonSwThermalSlowdown = 32 // ClocksThrottleReasonHwThermalSlowdown as defined in nvml/nvml.h ClocksThrottleReasonHwThermalSlowdown = 64 // ClocksThrottleReasonHwPowerBrakeSlowdown as defined in nvml/nvml.h ClocksThrottleReasonHwPowerBrakeSlowdown = 128 + // ClocksEventReasonDisplayClockSetting as defined in nvml/nvml.h + ClocksEventReasonDisplayClockSetting = 256 + // ClocksEventReasonNone as defined in nvml/nvml.h + ClocksEventReasonNone = 0 + // ClocksEventReasonAll as defined in nvml/nvml.h + ClocksEventReasonAll = 511 + // ClocksThrottleReasonGpuIdle as defined in nvml/nvml.h + ClocksThrottleReasonGpuIdle = 1 + // ClocksThrottleReasonApplicationsClocksSetting as defined in nvml/nvml.h + ClocksThrottleReasonApplicationsClocksSetting = 2 + // ClocksThrottleReasonSyncBoost as defined in nvml/nvml.h + ClocksThrottleReasonSyncBoost = 16 + // ClocksThrottleReasonSwPowerCap as defined in nvml/nvml.h + ClocksThrottleReasonSwPowerCap = 4 + // ClocksThrottleReasonSwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonSwThermalSlowdown = 32 // ClocksThrottleReasonDisplayClockSetting as defined in nvml/nvml.h ClocksThrottleReasonDisplayClockSetting = 256 // ClocksThrottleReasonNone as defined in nvml/nvml.h @@ -552,6 +868,60 @@ const ( NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE = 8 // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT as defined in nvml/nvml.h NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT = 16 + // CC_SYSTEM_CPU_CAPS_NONE as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_NONE = 0 + // CC_SYSTEM_CPU_CAPS_AMD_SEV as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_AMD_SEV = 1 + // CC_SYSTEM_CPU_CAPS_INTEL_TDX as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_INTEL_TDX = 2 + // CC_SYSTEM_CPU_CAPS_AMD_SEV_SNP as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_AMD_SEV_SNP = 3 + // CC_SYSTEM_CPU_CAPS_AMD_SNP_VTOM as defined in nvml/nvml.h + CC_SYSTEM_CPU_CAPS_AMD_SNP_VTOM = 4 + // CC_SYSTEM_GPUS_CC_NOT_CAPABLE as defined in nvml/nvml.h + CC_SYSTEM_GPUS_CC_NOT_CAPABLE = 0 + // CC_SYSTEM_GPUS_CC_CAPABLE as defined in nvml/nvml.h + CC_SYSTEM_GPUS_CC_CAPABLE = 1 + // CC_SYSTEM_DEVTOOLS_MODE_OFF as defined in nvml/nvml.h + CC_SYSTEM_DEVTOOLS_MODE_OFF = 0 + // CC_SYSTEM_DEVTOOLS_MODE_ON as defined in nvml/nvml.h + CC_SYSTEM_DEVTOOLS_MODE_ON = 1 + // CC_SYSTEM_ENVIRONMENT_UNAVAILABLE as defined in nvml/nvml.h + CC_SYSTEM_ENVIRONMENT_UNAVAILABLE = 0 + // CC_SYSTEM_ENVIRONMENT_SIM as defined in nvml/nvml.h + CC_SYSTEM_ENVIRONMENT_SIM = 1 + // CC_SYSTEM_ENVIRONMENT_PROD as defined in nvml/nvml.h + CC_SYSTEM_ENVIRONMENT_PROD = 2 + // CC_SYSTEM_FEATURE_DISABLED as defined in nvml/nvml.h + CC_SYSTEM_FEATURE_DISABLED = 0 + // CC_SYSTEM_FEATURE_ENABLED as defined in nvml/nvml.h + CC_SYSTEM_FEATURE_ENABLED = 1 + // CC_SYSTEM_MULTIGPU_NONE as defined in nvml/nvml.h + CC_SYSTEM_MULTIGPU_NONE = 0 + // CC_SYSTEM_MULTIGPU_PROTECTED_PCIE as defined in nvml/nvml.h + CC_SYSTEM_MULTIGPU_PROTECTED_PCIE = 1 + // CC_ACCEPTING_CLIENT_REQUESTS_FALSE as defined in nvml/nvml.h + CC_ACCEPTING_CLIENT_REQUESTS_FALSE = 0 + // CC_ACCEPTING_CLIENT_REQUESTS_TRUE as defined in nvml/nvml.h + CC_ACCEPTING_CLIENT_REQUESTS_TRUE = 1 + // GPU_CERT_CHAIN_SIZE as defined in nvml/nvml.h + GPU_CERT_CHAIN_SIZE = 4096 + // GPU_ATTESTATION_CERT_CHAIN_SIZE as defined in nvml/nvml.h + GPU_ATTESTATION_CERT_CHAIN_SIZE = 5120 + // CC_GPU_CEC_NONCE_SIZE as defined in nvml/nvml.h + CC_GPU_CEC_NONCE_SIZE = 32 + // CC_GPU_ATTESTATION_REPORT_SIZE as defined in nvml/nvml.h + CC_GPU_ATTESTATION_REPORT_SIZE = 8192 + // CC_GPU_CEC_ATTESTATION_REPORT_SIZE as defined in nvml/nvml.h + CC_GPU_CEC_ATTESTATION_REPORT_SIZE = 4096 + // CC_CEC_ATTESTATION_REPORT_NOT_PRESENT as defined in nvml/nvml.h + CC_CEC_ATTESTATION_REPORT_NOT_PRESENT = 0 + // CC_CEC_ATTESTATION_REPORT_PRESENT as defined in nvml/nvml.h + CC_CEC_ATTESTATION_REPORT_PRESENT = 1 + // CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN as defined in nvml/nvml.h + CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN = 50 + // CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX as defined in nvml/nvml.h + CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX = 65 // GPU_FABRIC_UUID_LEN as defined in nvml/nvml.h GPU_FABRIC_UUID_LEN = 16 // GPU_FABRIC_STATE_NOT_SUPPORTED as defined in nvml/nvml.h @@ -562,6 +932,46 @@ const ( GPU_FABRIC_STATE_IN_PROGRESS = 2 // GPU_FABRIC_STATE_COMPLETED as defined in nvml/nvml.h GPU_FABRIC_STATE_COMPLETED = 3 + // GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED = 0 + // GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE = 1 + // GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE = 2 + // GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW = 0 + // GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW = 3 + // GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_NOT_SUPPORTED = 0 + // GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_TRUE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_TRUE = 1 + // GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_FALSE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_FALSE = 2 + // GPU_FABRIC_HEALTH_MASK_SHIFT_ROUTE_RECOVERY as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_SHIFT_ROUTE_RECOVERY = 2 + // GPU_FABRIC_HEALTH_MASK_WIDTH_ROUTE_RECOVERY as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_WIDTH_ROUTE_RECOVERY = 3 + // GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_NOT_SUPPORTED = 0 + // GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_TRUE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_TRUE = 1 + // GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_FALSE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_FALSE = 2 + // GPU_FABRIC_HEALTH_MASK_SHIFT_ROUTE_UNHEALTHY as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_SHIFT_ROUTE_UNHEALTHY = 4 + // GPU_FABRIC_HEALTH_MASK_WIDTH_ROUTE_UNHEALTHY as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_WIDTH_ROUTE_UNHEALTHY = 3 + // GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_NOT_SUPPORTED = 0 + // GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_TRUE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_TRUE = 1 + // GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_FALSE as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_FALSE = 2 + // GPU_FABRIC_HEALTH_MASK_SHIFT_ACCESS_TIMEOUT_RECOVERY as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_SHIFT_ACCESS_TIMEOUT_RECOVERY = 6 + // GPU_FABRIC_HEALTH_MASK_WIDTH_ACCESS_TIMEOUT_RECOVERY as defined in nvml/nvml.h + GPU_FABRIC_HEALTH_MASK_WIDTH_ACCESS_TIMEOUT_RECOVERY = 3 // INIT_FLAG_NO_GPUS as defined in nvml/nvml.h INIT_FLAG_NO_GPUS = 1 // INIT_FLAG_NO_ATTACH as defined in nvml/nvml.h @@ -590,6 +1000,22 @@ const ( AFFINITY_SCOPE_NODE = 0 // AFFINITY_SCOPE_SOCKET as defined in nvml/nvml.h AFFINITY_SCOPE_SOCKET = 1 + // NVLINK_BER_MANTISSA_SHIFT as defined in nvml/nvml.h + NVLINK_BER_MANTISSA_SHIFT = 8 + // NVLINK_BER_MANTISSA_WIDTH as defined in nvml/nvml.h + NVLINK_BER_MANTISSA_WIDTH = 15 + // NVLINK_BER_EXP_SHIFT as defined in nvml/nvml.h + NVLINK_BER_EXP_SHIFT = 0 + // NVLINK_BER_EXP_WIDTH as defined in nvml/nvml.h + NVLINK_BER_EXP_WIDTH = 255 + // NVLINK_STATE_INACTIVE as defined in nvml/nvml.h + NVLINK_STATE_INACTIVE = 0 + // NVLINK_STATE_ACTIVE as defined in nvml/nvml.h + NVLINK_STATE_ACTIVE = 1 + // NVLINK_STATE_SLEEP as defined in nvml/nvml.h + NVLINK_STATE_SLEEP = 2 + // NVLINK_TOTAL_SUPPORTED_BW_MODES as defined in nvml/nvml.h + NVLINK_TOTAL_SUPPORTED_BW_MODES = 23 // DEVICE_MIG_DISABLE as defined in nvml/nvml.h DEVICE_MIG_DISABLE = 0 // DEVICE_MIG_ENABLE as defined in nvml/nvml.h @@ -614,8 +1040,30 @@ const ( GPU_INSTANCE_PROFILE_2_SLICE_REV1 = 8 // GPU_INSTANCE_PROFILE_1_SLICE_REV2 as defined in nvml/nvml.h GPU_INSTANCE_PROFILE_1_SLICE_REV2 = 9 + // GPU_INSTANCE_PROFILE_1_SLICE_GFX as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_GFX = 10 + // GPU_INSTANCE_PROFILE_2_SLICE_GFX as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE_GFX = 11 + // GPU_INSTANCE_PROFILE_4_SLICE_GFX as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_4_SLICE_GFX = 12 + // GPU_INSTANCE_PROFILE_1_SLICE_NO_ME as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_NO_ME = 13 + // GPU_INSTANCE_PROFILE_2_SLICE_NO_ME as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE_NO_ME = 14 + // GPU_INSTANCE_PROFILE_1_SLICE_ALL_ME as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_ALL_ME = 15 + // GPU_INSTANCE_PROFILE_2_SLICE_ALL_ME as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE_ALL_ME = 16 // GPU_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h - GPU_INSTANCE_PROFILE_COUNT = 10 + GPU_INSTANCE_PROFILE_COUNT = 17 + // GPU_INSTANCE_PROFILE_CAPS_P2P as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_CAPS_P2P = 1 + // GPU_INTSTANCE_PROFILE_CAPS_P2P as defined in nvml/nvml.h + GPU_INTSTANCE_PROFILE_CAPS_P2P = 1 + // GPU_INSTANCE_PROFILE_CAPS_GFX as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_CAPS_GFX = 2 + // COMPUTE_INSTANCE_PROFILE_CAPS_GFX as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_CAPS_GFX = 1 // COMPUTE_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h COMPUTE_INSTANCE_PROFILE_1_SLICE = 0 // COMPUTE_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h @@ -642,20 +1090,24 @@ const ( GPM_METRICS_GET_VERSION = 1 // GPM_SUPPORT_VERSION as defined in nvml/nvml.h GPM_SUPPORT_VERSION = 1 - // COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE as defined in nvml/nvml.h - COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE = 0 - // COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE as defined in nvml/nvml.h - COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE = 1 - // NVLINK_POWER_STATE_HIGH_SPEED as defined in nvml/nvml.h - NVLINK_POWER_STATE_HIGH_SPEED = 0 - // NVLINK_POWER_STATE_LOW as defined in nvml/nvml.h - NVLINK_POWER_STATE_LOW = 1 - // NVLINK_LOW_POWER_THRESHOLD_MIN as defined in nvml/nvml.h - NVLINK_LOW_POWER_THRESHOLD_MIN = 1 - // NVLINK_LOW_POWER_THRESHOLD_MAX as defined in nvml/nvml.h - NVLINK_LOW_POWER_THRESHOLD_MAX = 8191 - // NVLINK_LOW_POWER_THRESHOLD_RESET as defined in nvml/nvml.h - NVLINK_LOW_POWER_THRESHOLD_RESET = 4294967295 + // DEV_CAP_EGM as defined in nvml/nvml.h + DEV_CAP_EGM = 1 + // WORKLOAD_POWER_MAX_PROFILES as defined in nvml/nvml.h + WORKLOAD_POWER_MAX_PROFILES = 255 + // POWER_SMOOTHING_MAX_NUM_PROFILES as defined in nvml/nvml.h + POWER_SMOOTHING_MAX_NUM_PROFILES = 5 + // POWER_SMOOTHING_NUM_PROFILE_PARAMS as defined in nvml/nvml.h + POWER_SMOOTHING_NUM_PROFILE_PARAMS = 4 + // POWER_SMOOTHING_ADMIN_OVERRIDE_NOT_SET as defined in nvml/nvml.h + POWER_SMOOTHING_ADMIN_OVERRIDE_NOT_SET = 4294967295 + // POWER_SMOOTHING_PROFILE_PARAM_PERCENT_TMP_FLOOR as defined in nvml/nvml.h + POWER_SMOOTHING_PROFILE_PARAM_PERCENT_TMP_FLOOR = 0 + // POWER_SMOOTHING_PROFILE_PARAM_RAMP_UP_RATE as defined in nvml/nvml.h + POWER_SMOOTHING_PROFILE_PARAM_RAMP_UP_RATE = 1 + // POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_RATE as defined in nvml/nvml.h + POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_RATE = 2 + // POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_HYSTERESIS as defined in nvml/nvml.h + POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_HYSTERESIS = 3 ) // BridgeChipType as declared in nvml/nvml.h @@ -753,6 +1205,7 @@ type GpuP2PStatus int32 const ( P2P_STATUS_OK GpuP2PStatus = iota P2P_STATUS_CHIPSET_NOT_SUPPORED GpuP2PStatus = 1 + P2P_STATUS_CHIPSET_NOT_SUPPORTED GpuP2PStatus = 1 P2P_STATUS_GPU_NOT_SUPPORTED GpuP2PStatus = 2 P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED GpuP2PStatus = 3 P2P_STATUS_DISABLED_BY_REGKEY GpuP2PStatus = 4 @@ -769,6 +1222,7 @@ const ( P2P_CAPS_INDEX_WRITE GpuP2PCapsIndex = 1 P2P_CAPS_INDEX_NVLINK GpuP2PCapsIndex = 2 P2P_CAPS_INDEX_ATOMICS GpuP2PCapsIndex = 3 + P2P_CAPS_INDEX_PCI GpuP2PCapsIndex = 4 P2P_CAPS_INDEX_PROP GpuP2PCapsIndex = 4 P2P_CAPS_INDEX_UNKNOWN GpuP2PCapsIndex = 5 ) @@ -785,7 +1239,10 @@ const ( DEC_UTILIZATION_SAMPLES SamplingType = 4 PROCESSOR_CLK_SAMPLES SamplingType = 5 MEMORY_CLK_SAMPLES SamplingType = 6 - SAMPLINGTYPE_COUNT SamplingType = 7 + MODULE_POWER_SAMPLES SamplingType = 7 + JPG_UTILIZATION_SAMPLES SamplingType = 8 + OFA_UTILIZATION_SAMPLES SamplingType = 9 + SAMPLINGTYPE_COUNT SamplingType = 10 ) // PcieUtilCounter as declared in nvml/nvml.h @@ -808,7 +1265,9 @@ const ( VALUE_TYPE_UNSIGNED_LONG ValueType = 2 VALUE_TYPE_UNSIGNED_LONG_LONG ValueType = 3 VALUE_TYPE_SIGNED_LONG_LONG ValueType = 4 - VALUE_TYPE_COUNT ValueType = 5 + VALUE_TYPE_SIGNED_INT ValueType = 5 + VALUE_TYPE_UNSIGNED_SHORT ValueType = 6 + VALUE_TYPE_COUNT ValueType = 7 ) // PerfPolicyType as declared in nvml/nvml.h @@ -827,6 +1286,29 @@ const ( PERF_POLICY_COUNT PerfPolicyType = 12 ) +// CoolerControl as declared in nvml/nvml.h +type CoolerControl int32 + +// CoolerControl enumeration from nvml/nvml.h +const ( + THERMAL_COOLER_SIGNAL_NONE CoolerControl = iota + THERMAL_COOLER_SIGNAL_TOGGLE CoolerControl = 1 + THERMAL_COOLER_SIGNAL_VARIABLE CoolerControl = 2 + THERMAL_COOLER_SIGNAL_COUNT CoolerControl = 3 +) + +// CoolerTarget as declared in nvml/nvml.h +type CoolerTarget int32 + +// CoolerTarget enumeration from nvml/nvml.h +const ( + THERMAL_COOLER_TARGET_NONE CoolerTarget = 1 + THERMAL_COOLER_TARGET_GPU CoolerTarget = 2 + THERMAL_COOLER_TARGET_MEMORY CoolerTarget = 4 + THERMAL_COOLER_TARGET_POWER_SUPPLY CoolerTarget = 8 + THERMAL_COOLER_TARGET_GPU_RELATED CoolerTarget = 14 +) + // EnableState as declared in nvml/nvml.h type EnableState int32 @@ -874,7 +1356,8 @@ const ( TEMPERATURE_THRESHOLD_ACOUSTIC_MIN TemperatureThresholds = 4 TEMPERATURE_THRESHOLD_ACOUSTIC_CURR TemperatureThresholds = 5 TEMPERATURE_THRESHOLD_ACOUSTIC_MAX TemperatureThresholds = 6 - TEMPERATURE_THRESHOLD_COUNT TemperatureThresholds = 7 + TEMPERATURE_THRESHOLD_GPS_CURR TemperatureThresholds = 7 + TEMPERATURE_THRESHOLD_COUNT TemperatureThresholds = 8 ) // TemperatureSensors as declared in nvml/nvml.h @@ -908,6 +1391,21 @@ const ( MEMORY_ERROR_TYPE_COUNT MemoryErrorType = 2 ) +// NvlinkVersion as declared in nvml/nvml.h +type NvlinkVersion int32 + +// NvlinkVersion enumeration from nvml/nvml.h +const ( + NVLINK_VERSION_INVALID NvlinkVersion = iota + NVLINK_VERSION_1_0 NvlinkVersion = 1 + NVLINK_VERSION_2_0 NvlinkVersion = 2 + NVLINK_VERSION_2_2 NvlinkVersion = 3 + NVLINK_VERSION_3_0 NvlinkVersion = 4 + NVLINK_VERSION_3_1 NvlinkVersion = 5 + NVLINK_VERSION_4_0 NvlinkVersion = 6 + NVLINK_VERSION_5_0 NvlinkVersion = 7 +) + // EccCounterType as declared in nvml/nvml.h type EccCounterType int32 @@ -949,6 +1447,7 @@ type DriverModel int32 const ( DRIVER_WDDM DriverModel = iota DRIVER_WDM DriverModel = 1 + DRIVER_MCDM DriverModel = 2 ) // Pstates as declared in nvml/nvml.h @@ -993,7 +1492,8 @@ const ( INFOROM_OEM InforomObject = iota INFOROM_ECC InforomObject = 1 INFOROM_POWER InforomObject = 2 - INFOROM_COUNT InforomObject = 3 + INFOROM_DEN InforomObject = 3 + INFOROM_COUNT InforomObject = 4 ) // Return as declared in nvml/nvml.h @@ -1028,6 +1528,9 @@ const ( ERROR_FREQ_NOT_SUPPORTED Return = 24 ERROR_ARGUMENT_VERSION_MISMATCH Return = 25 ERROR_DEPRECATED Return = 26 + ERROR_NOT_READY Return = 27 + ERROR_GPU_NOT_FOUND Return = 28 + ERROR_INVALID_STATE Return = 29 ERROR_UNKNOWN Return = 999 ) @@ -1068,6 +1571,17 @@ const ( RESTRICTED_API_COUNT RestrictedAPI = 2 ) +// GpuUtilizationDomainId as declared in nvml/nvml.h +type GpuUtilizationDomainId int32 + +// GpuUtilizationDomainId enumeration from nvml/nvml.h +const ( + GPU_UTILIZATION_DOMAIN_GPU GpuUtilizationDomainId = iota + GPU_UTILIZATION_DOMAIN_FB GpuUtilizationDomainId = 1 + GPU_UTILIZATION_DOMAIN_VID GpuUtilizationDomainId = 2 + GPU_UTILIZATION_DOMAIN_BUS GpuUtilizationDomainId = 3 +) + // GpuVirtualizationMode as declared in nvml/nvml.h type GpuVirtualizationMode int32 @@ -1126,7 +1640,8 @@ type VgpuDriverCapability int32 // VgpuDriverCapability enumeration from nvml/nvml.h const ( VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU VgpuDriverCapability = iota - VGPU_DRIVER_CAP_COUNT VgpuDriverCapability = 1 + VGPU_DRIVER_CAP_WARM_UPDATE VgpuDriverCapability = 1 + VGPU_DRIVER_CAP_COUNT VgpuDriverCapability = 2 ) // DeviceVgpuCapability as declared in nvml/nvml.h @@ -1137,18 +1652,28 @@ const ( DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU DeviceVgpuCapability = iota DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES DeviceVgpuCapability = 1 DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES DeviceVgpuCapability = 2 - DEVICE_VGPU_CAP_COUNT DeviceVgpuCapability = 3 + DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW DeviceVgpuCapability = 3 + DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW DeviceVgpuCapability = 4 + DEVICE_VGPU_CAP_DEVICE_STREAMING DeviceVgpuCapability = 5 + DEVICE_VGPU_CAP_MINI_QUARTER_GPU DeviceVgpuCapability = 6 + DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU DeviceVgpuCapability = 7 + DEVICE_VGPU_CAP_WARM_UPDATE DeviceVgpuCapability = 8 + DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS DeviceVgpuCapability = 9 + DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED DeviceVgpuCapability = 10 + DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED DeviceVgpuCapability = 11 + DEVICE_VGPU_CAP_COUNT DeviceVgpuCapability = 12 ) -// GpuUtilizationDomainId as declared in nvml/nvml.h -type GpuUtilizationDomainId int32 +// DeviceGpuRecoveryAction as declared in nvml/nvml.h +type DeviceGpuRecoveryAction int32 -// GpuUtilizationDomainId enumeration from nvml/nvml.h +// DeviceGpuRecoveryAction enumeration from nvml/nvml.h const ( - GPU_UTILIZATION_DOMAIN_GPU GpuUtilizationDomainId = iota - GPU_UTILIZATION_DOMAIN_FB GpuUtilizationDomainId = 1 - GPU_UTILIZATION_DOMAIN_VID GpuUtilizationDomainId = 2 - GPU_UTILIZATION_DOMAIN_BUS GpuUtilizationDomainId = 3 + GPU_RECOVERY_ACTION_NONE DeviceGpuRecoveryAction = iota + GPU_RECOVERY_ACTION_GPU_RESET DeviceGpuRecoveryAction = 1 + GPU_RECOVERY_ACTION_NODE_REBOOT DeviceGpuRecoveryAction = 2 + GPU_RECOVERY_ACTION_DRAIN_P2P DeviceGpuRecoveryAction = 3 + GPU_RECOVERY_ACTION_DRAIN_AND_RESET DeviceGpuRecoveryAction = 4 ) // FanState as declared in nvml/nvml.h @@ -1174,8 +1699,10 @@ type EncoderType int32 // EncoderType enumeration from nvml/nvml.h const ( - ENCODER_QUERY_H264 EncoderType = iota - ENCODER_QUERY_HEVC EncoderType = 1 + ENCODER_QUERY_H264 EncoderType = iota + ENCODER_QUERY_HEVC EncoderType = 1 + ENCODER_QUERY_AV1 EncoderType = 2 + ENCODER_QUERY_UNKNOWN EncoderType = 255 ) // FBCSessionType as declared in nvml/nvml.h @@ -1285,6 +1812,16 @@ const ( THERMAL_CONTROLLER_UNKNOWN ThermalController = -1 ) +// UUIDType as declared in nvml/nvml.h +type UUIDType int32 + +// UUIDType enumeration from nvml/nvml.h +const ( + UUID_TYPE_NONE UUIDType = iota + UUID_TYPE_ASCII UUIDType = 1 + UUID_TYPE_BINARY UUIDType = 2 +) + // GridLicenseFeatureCode as declared in nvml/nvml.h type GridLicenseFeatureCode int32 @@ -1303,74 +1840,208 @@ type GpmMetricId int32 // GpmMetricId enumeration from nvml/nvml.h const ( - GPM_METRIC_GRAPHICS_UTIL GpmMetricId = 1 - GPM_METRIC_SM_UTIL GpmMetricId = 2 - GPM_METRIC_SM_OCCUPANCY GpmMetricId = 3 - GPM_METRIC_INTEGER_UTIL GpmMetricId = 4 - GPM_METRIC_ANY_TENSOR_UTIL GpmMetricId = 5 - GPM_METRIC_DFMA_TENSOR_UTIL GpmMetricId = 6 - GPM_METRIC_HMMA_TENSOR_UTIL GpmMetricId = 7 - GPM_METRIC_IMMA_TENSOR_UTIL GpmMetricId = 9 - GPM_METRIC_DRAM_BW_UTIL GpmMetricId = 10 - GPM_METRIC_FP64_UTIL GpmMetricId = 11 - GPM_METRIC_FP32_UTIL GpmMetricId = 12 - GPM_METRIC_FP16_UTIL GpmMetricId = 13 - GPM_METRIC_PCIE_TX_PER_SEC GpmMetricId = 20 - GPM_METRIC_PCIE_RX_PER_SEC GpmMetricId = 21 - GPM_METRIC_NVDEC_0_UTIL GpmMetricId = 30 - GPM_METRIC_NVDEC_1_UTIL GpmMetricId = 31 - GPM_METRIC_NVDEC_2_UTIL GpmMetricId = 32 - GPM_METRIC_NVDEC_3_UTIL GpmMetricId = 33 - GPM_METRIC_NVDEC_4_UTIL GpmMetricId = 34 - GPM_METRIC_NVDEC_5_UTIL GpmMetricId = 35 - GPM_METRIC_NVDEC_6_UTIL GpmMetricId = 36 - GPM_METRIC_NVDEC_7_UTIL GpmMetricId = 37 - GPM_METRIC_NVJPG_0_UTIL GpmMetricId = 40 - GPM_METRIC_NVJPG_1_UTIL GpmMetricId = 41 - GPM_METRIC_NVJPG_2_UTIL GpmMetricId = 42 - GPM_METRIC_NVJPG_3_UTIL GpmMetricId = 43 - GPM_METRIC_NVJPG_4_UTIL GpmMetricId = 44 - GPM_METRIC_NVJPG_5_UTIL GpmMetricId = 45 - GPM_METRIC_NVJPG_6_UTIL GpmMetricId = 46 - GPM_METRIC_NVJPG_7_UTIL GpmMetricId = 47 - GPM_METRIC_NVOFA_0_UTIL GpmMetricId = 50 - GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC GpmMetricId = 60 - GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC GpmMetricId = 61 - GPM_METRIC_NVLINK_L0_RX_PER_SEC GpmMetricId = 62 - GPM_METRIC_NVLINK_L0_TX_PER_SEC GpmMetricId = 63 - GPM_METRIC_NVLINK_L1_RX_PER_SEC GpmMetricId = 64 - GPM_METRIC_NVLINK_L1_TX_PER_SEC GpmMetricId = 65 - GPM_METRIC_NVLINK_L2_RX_PER_SEC GpmMetricId = 66 - GPM_METRIC_NVLINK_L2_TX_PER_SEC GpmMetricId = 67 - GPM_METRIC_NVLINK_L3_RX_PER_SEC GpmMetricId = 68 - GPM_METRIC_NVLINK_L3_TX_PER_SEC GpmMetricId = 69 - GPM_METRIC_NVLINK_L4_RX_PER_SEC GpmMetricId = 70 - GPM_METRIC_NVLINK_L4_TX_PER_SEC GpmMetricId = 71 - GPM_METRIC_NVLINK_L5_RX_PER_SEC GpmMetricId = 72 - GPM_METRIC_NVLINK_L5_TX_PER_SEC GpmMetricId = 73 - GPM_METRIC_NVLINK_L6_RX_PER_SEC GpmMetricId = 74 - GPM_METRIC_NVLINK_L6_TX_PER_SEC GpmMetricId = 75 - GPM_METRIC_NVLINK_L7_RX_PER_SEC GpmMetricId = 76 - GPM_METRIC_NVLINK_L7_TX_PER_SEC GpmMetricId = 77 - GPM_METRIC_NVLINK_L8_RX_PER_SEC GpmMetricId = 78 - GPM_METRIC_NVLINK_L8_TX_PER_SEC GpmMetricId = 79 - GPM_METRIC_NVLINK_L9_RX_PER_SEC GpmMetricId = 80 - GPM_METRIC_NVLINK_L9_TX_PER_SEC GpmMetricId = 81 - GPM_METRIC_NVLINK_L10_RX_PER_SEC GpmMetricId = 82 - GPM_METRIC_NVLINK_L10_TX_PER_SEC GpmMetricId = 83 - GPM_METRIC_NVLINK_L11_RX_PER_SEC GpmMetricId = 84 - GPM_METRIC_NVLINK_L11_TX_PER_SEC GpmMetricId = 85 - GPM_METRIC_NVLINK_L12_RX_PER_SEC GpmMetricId = 86 - GPM_METRIC_NVLINK_L12_TX_PER_SEC GpmMetricId = 87 - GPM_METRIC_NVLINK_L13_RX_PER_SEC GpmMetricId = 88 - GPM_METRIC_NVLINK_L13_TX_PER_SEC GpmMetricId = 89 - GPM_METRIC_NVLINK_L14_RX_PER_SEC GpmMetricId = 90 - GPM_METRIC_NVLINK_L14_TX_PER_SEC GpmMetricId = 91 - GPM_METRIC_NVLINK_L15_RX_PER_SEC GpmMetricId = 92 - GPM_METRIC_NVLINK_L15_TX_PER_SEC GpmMetricId = 93 - GPM_METRIC_NVLINK_L16_RX_PER_SEC GpmMetricId = 94 - GPM_METRIC_NVLINK_L16_TX_PER_SEC GpmMetricId = 95 - GPM_METRIC_NVLINK_L17_RX_PER_SEC GpmMetricId = 96 - GPM_METRIC_NVLINK_L17_TX_PER_SEC GpmMetricId = 97 - GPM_METRIC_MAX GpmMetricId = 98 + GPM_METRIC_GRAPHICS_UTIL GpmMetricId = 1 + GPM_METRIC_SM_UTIL GpmMetricId = 2 + GPM_METRIC_SM_OCCUPANCY GpmMetricId = 3 + GPM_METRIC_INTEGER_UTIL GpmMetricId = 4 + GPM_METRIC_ANY_TENSOR_UTIL GpmMetricId = 5 + GPM_METRIC_DFMA_TENSOR_UTIL GpmMetricId = 6 + GPM_METRIC_HMMA_TENSOR_UTIL GpmMetricId = 7 + GPM_METRIC_IMMA_TENSOR_UTIL GpmMetricId = 9 + GPM_METRIC_DRAM_BW_UTIL GpmMetricId = 10 + GPM_METRIC_FP64_UTIL GpmMetricId = 11 + GPM_METRIC_FP32_UTIL GpmMetricId = 12 + GPM_METRIC_FP16_UTIL GpmMetricId = 13 + GPM_METRIC_PCIE_TX_PER_SEC GpmMetricId = 20 + GPM_METRIC_PCIE_RX_PER_SEC GpmMetricId = 21 + GPM_METRIC_NVDEC_0_UTIL GpmMetricId = 30 + GPM_METRIC_NVDEC_1_UTIL GpmMetricId = 31 + GPM_METRIC_NVDEC_2_UTIL GpmMetricId = 32 + GPM_METRIC_NVDEC_3_UTIL GpmMetricId = 33 + GPM_METRIC_NVDEC_4_UTIL GpmMetricId = 34 + GPM_METRIC_NVDEC_5_UTIL GpmMetricId = 35 + GPM_METRIC_NVDEC_6_UTIL GpmMetricId = 36 + GPM_METRIC_NVDEC_7_UTIL GpmMetricId = 37 + GPM_METRIC_NVJPG_0_UTIL GpmMetricId = 40 + GPM_METRIC_NVJPG_1_UTIL GpmMetricId = 41 + GPM_METRIC_NVJPG_2_UTIL GpmMetricId = 42 + GPM_METRIC_NVJPG_3_UTIL GpmMetricId = 43 + GPM_METRIC_NVJPG_4_UTIL GpmMetricId = 44 + GPM_METRIC_NVJPG_5_UTIL GpmMetricId = 45 + GPM_METRIC_NVJPG_6_UTIL GpmMetricId = 46 + GPM_METRIC_NVJPG_7_UTIL GpmMetricId = 47 + GPM_METRIC_NVOFA_0_UTIL GpmMetricId = 50 + GPM_METRIC_NVOFA_1_UTIL GpmMetricId = 51 + GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC GpmMetricId = 60 + GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC GpmMetricId = 61 + GPM_METRIC_NVLINK_L0_RX_PER_SEC GpmMetricId = 62 + GPM_METRIC_NVLINK_L0_TX_PER_SEC GpmMetricId = 63 + GPM_METRIC_NVLINK_L1_RX_PER_SEC GpmMetricId = 64 + GPM_METRIC_NVLINK_L1_TX_PER_SEC GpmMetricId = 65 + GPM_METRIC_NVLINK_L2_RX_PER_SEC GpmMetricId = 66 + GPM_METRIC_NVLINK_L2_TX_PER_SEC GpmMetricId = 67 + GPM_METRIC_NVLINK_L3_RX_PER_SEC GpmMetricId = 68 + GPM_METRIC_NVLINK_L3_TX_PER_SEC GpmMetricId = 69 + GPM_METRIC_NVLINK_L4_RX_PER_SEC GpmMetricId = 70 + GPM_METRIC_NVLINK_L4_TX_PER_SEC GpmMetricId = 71 + GPM_METRIC_NVLINK_L5_RX_PER_SEC GpmMetricId = 72 + GPM_METRIC_NVLINK_L5_TX_PER_SEC GpmMetricId = 73 + GPM_METRIC_NVLINK_L6_RX_PER_SEC GpmMetricId = 74 + GPM_METRIC_NVLINK_L6_TX_PER_SEC GpmMetricId = 75 + GPM_METRIC_NVLINK_L7_RX_PER_SEC GpmMetricId = 76 + GPM_METRIC_NVLINK_L7_TX_PER_SEC GpmMetricId = 77 + GPM_METRIC_NVLINK_L8_RX_PER_SEC GpmMetricId = 78 + GPM_METRIC_NVLINK_L8_TX_PER_SEC GpmMetricId = 79 + GPM_METRIC_NVLINK_L9_RX_PER_SEC GpmMetricId = 80 + GPM_METRIC_NVLINK_L9_TX_PER_SEC GpmMetricId = 81 + GPM_METRIC_NVLINK_L10_RX_PER_SEC GpmMetricId = 82 + GPM_METRIC_NVLINK_L10_TX_PER_SEC GpmMetricId = 83 + GPM_METRIC_NVLINK_L11_RX_PER_SEC GpmMetricId = 84 + GPM_METRIC_NVLINK_L11_TX_PER_SEC GpmMetricId = 85 + GPM_METRIC_NVLINK_L12_RX_PER_SEC GpmMetricId = 86 + GPM_METRIC_NVLINK_L12_TX_PER_SEC GpmMetricId = 87 + GPM_METRIC_NVLINK_L13_RX_PER_SEC GpmMetricId = 88 + GPM_METRIC_NVLINK_L13_TX_PER_SEC GpmMetricId = 89 + GPM_METRIC_NVLINK_L14_RX_PER_SEC GpmMetricId = 90 + GPM_METRIC_NVLINK_L14_TX_PER_SEC GpmMetricId = 91 + GPM_METRIC_NVLINK_L15_RX_PER_SEC GpmMetricId = 92 + GPM_METRIC_NVLINK_L15_TX_PER_SEC GpmMetricId = 93 + GPM_METRIC_NVLINK_L16_RX_PER_SEC GpmMetricId = 94 + GPM_METRIC_NVLINK_L16_TX_PER_SEC GpmMetricId = 95 + GPM_METRIC_NVLINK_L17_RX_PER_SEC GpmMetricId = 96 + GPM_METRIC_NVLINK_L17_TX_PER_SEC GpmMetricId = 97 + GPM_METRIC_C2C_TOTAL_TX_PER_SEC GpmMetricId = 100 + GPM_METRIC_C2C_TOTAL_RX_PER_SEC GpmMetricId = 101 + GPM_METRIC_C2C_DATA_TX_PER_SEC GpmMetricId = 102 + GPM_METRIC_C2C_DATA_RX_PER_SEC GpmMetricId = 103 + GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC GpmMetricId = 104 + GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC GpmMetricId = 105 + GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC GpmMetricId = 106 + GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC GpmMetricId = 107 + GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC GpmMetricId = 108 + GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC GpmMetricId = 109 + GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC GpmMetricId = 110 + GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC GpmMetricId = 111 + GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC GpmMetricId = 112 + GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC GpmMetricId = 113 + GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC GpmMetricId = 114 + GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC GpmMetricId = 115 + GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC GpmMetricId = 116 + GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC GpmMetricId = 117 + GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC GpmMetricId = 118 + GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC GpmMetricId = 119 + GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC GpmMetricId = 120 + GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC GpmMetricId = 121 + GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC GpmMetricId = 122 + GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC GpmMetricId = 123 + GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC GpmMetricId = 124 + GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC GpmMetricId = 125 + GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC GpmMetricId = 126 + GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC GpmMetricId = 127 + GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC GpmMetricId = 128 + GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC GpmMetricId = 129 + GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC GpmMetricId = 130 + GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC GpmMetricId = 131 + GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC GpmMetricId = 132 + GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC GpmMetricId = 133 + GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC GpmMetricId = 134 + GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC GpmMetricId = 135 + GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC GpmMetricId = 136 + GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC GpmMetricId = 137 + GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC GpmMetricId = 138 + GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC GpmMetricId = 139 + GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC GpmMetricId = 140 + GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC GpmMetricId = 141 + GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC GpmMetricId = 142 + GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC GpmMetricId = 143 + GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC GpmMetricId = 144 + GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC GpmMetricId = 145 + GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC GpmMetricId = 146 + GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC GpmMetricId = 147 + GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC GpmMetricId = 148 + GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC GpmMetricId = 149 + GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC GpmMetricId = 150 + GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC GpmMetricId = 151 + GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC GpmMetricId = 152 + GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC GpmMetricId = 153 + GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC GpmMetricId = 154 + GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC GpmMetricId = 155 + GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC GpmMetricId = 156 + GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC GpmMetricId = 157 + GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC GpmMetricId = 158 + GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC GpmMetricId = 159 + GPM_METRIC_HOSTMEM_CACHE_HIT GpmMetricId = 160 + GPM_METRIC_HOSTMEM_CACHE_MISS GpmMetricId = 161 + GPM_METRIC_PEERMEM_CACHE_HIT GpmMetricId = 162 + GPM_METRIC_PEERMEM_CACHE_MISS GpmMetricId = 163 + GPM_METRIC_DRAM_CACHE_HIT GpmMetricId = 164 + GPM_METRIC_DRAM_CACHE_MISS GpmMetricId = 165 + GPM_METRIC_NVENC_0_UTIL GpmMetricId = 166 + GPM_METRIC_NVENC_1_UTIL GpmMetricId = 167 + GPM_METRIC_NVENC_2_UTIL GpmMetricId = 168 + GPM_METRIC_NVENC_3_UTIL GpmMetricId = 169 + GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED GpmMetricId = 170 + GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE GpmMetricId = 171 + GPM_METRIC_GR0_CTXSW_REQUESTS GpmMetricId = 172 + GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ GpmMetricId = 173 + GPM_METRIC_GR0_CTXSW_ACTIVE_PCT GpmMetricId = 174 + GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED GpmMetricId = 175 + GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE GpmMetricId = 176 + GPM_METRIC_GR1_CTXSW_REQUESTS GpmMetricId = 177 + GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ GpmMetricId = 178 + GPM_METRIC_GR1_CTXSW_ACTIVE_PCT GpmMetricId = 179 + GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED GpmMetricId = 180 + GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE GpmMetricId = 181 + GPM_METRIC_GR2_CTXSW_REQUESTS GpmMetricId = 182 + GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ GpmMetricId = 183 + GPM_METRIC_GR2_CTXSW_ACTIVE_PCT GpmMetricId = 184 + GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED GpmMetricId = 185 + GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE GpmMetricId = 186 + GPM_METRIC_GR3_CTXSW_REQUESTS GpmMetricId = 187 + GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ GpmMetricId = 188 + GPM_METRIC_GR3_CTXSW_ACTIVE_PCT GpmMetricId = 189 + GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED GpmMetricId = 190 + GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE GpmMetricId = 191 + GPM_METRIC_GR4_CTXSW_REQUESTS GpmMetricId = 192 + GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ GpmMetricId = 193 + GPM_METRIC_GR4_CTXSW_ACTIVE_PCT GpmMetricId = 194 + GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED GpmMetricId = 195 + GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE GpmMetricId = 196 + GPM_METRIC_GR5_CTXSW_REQUESTS GpmMetricId = 197 + GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ GpmMetricId = 198 + GPM_METRIC_GR5_CTXSW_ACTIVE_PCT GpmMetricId = 199 + GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED GpmMetricId = 200 + GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE GpmMetricId = 201 + GPM_METRIC_GR6_CTXSW_REQUESTS GpmMetricId = 202 + GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ GpmMetricId = 203 + GPM_METRIC_GR6_CTXSW_ACTIVE_PCT GpmMetricId = 204 + GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED GpmMetricId = 205 + GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE GpmMetricId = 206 + GPM_METRIC_GR7_CTXSW_REQUESTS GpmMetricId = 207 + GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ GpmMetricId = 208 + GPM_METRIC_GR7_CTXSW_ACTIVE_PCT GpmMetricId = 209 + GPM_METRIC_MAX GpmMetricId = 210 +) + +// PowerProfileType as declared in nvml/nvml.h +type PowerProfileType int32 + +// PowerProfileType enumeration from nvml/nvml.h +const ( + POWER_PROFILE_MAX_P PowerProfileType = iota + POWER_PROFILE_MAX_Q PowerProfileType = 1 + POWER_PROFILE_COMPUTE PowerProfileType = 2 + POWER_PROFILE_MEMORY_BOUND PowerProfileType = 3 + POWER_PROFILE_NETWORK PowerProfileType = 4 + POWER_PROFILE_BALANCED PowerProfileType = 5 + POWER_PROFILE_LLM_INFERENCE PowerProfileType = 6 + POWER_PROFILE_LLM_TRAINING PowerProfileType = 7 + POWER_PROFILE_RBM PowerProfileType = 8 + POWER_PROFILE_DCPCIE PowerProfileType = 9 + POWER_PROFILE_HMMA_SPARSE PowerProfileType = 10 + POWER_PROFILE_HMMA_DENSE PowerProfileType = 11 + POWER_PROFILE_SYNC_BALANCED PowerProfileType = 12 + POWER_PROFILE_HPC PowerProfileType = 13 + POWER_PROFILE_MIG PowerProfileType = 14 + POWER_PROFILE_MAX PowerProfileType = 15 ) diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go index 7604d39..4784cd4 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go @@ -68,16 +68,6 @@ type GpuInstanceInfo struct { Placement GpuInstancePlacement } -func (g GpuInstanceInfo) convert() nvmlGpuInstanceInfo { - out := nvmlGpuInstanceInfo{ - Device: g.Device.(nvmlDevice), - Id: g.Id, - ProfileId: g.ProfileId, - Placement: g.Placement, - } - return out -} - func (g nvmlGpuInstanceInfo) convert() GpuInstanceInfo { out := GpuInstanceInfo{ Device: g.Device, @@ -97,17 +87,6 @@ type ComputeInstanceInfo struct { Placement ComputeInstancePlacement } -func (c ComputeInstanceInfo) convert() nvmlComputeInstanceInfo { - out := nvmlComputeInstanceInfo{ - Device: c.Device.(nvmlDevice), - GpuInstance: c.GpuInstance.(nvmlGpuInstance), - Id: c.Id, - ProfileId: c.ProfileId, - Placement: c.Placement, - } - return out -} - func (c nvmlComputeInstanceInfo) convert() ComputeInstanceInfo { out := ComputeInstanceInfo{ Device: c.Device, @@ -147,6 +126,13 @@ func (l *library) DeviceGetHandleByUUID(uuid string) (Device, Return) { return device, ret } +// nvml.DeviceGetHandleByUUIDV() +func (l *library) DeviceGetHandleByUUIDV(uuid *UUID) (Device, Return) { + var device nvmlDevice + ret := nvmlDeviceGetHandleByUUIDV(uuid, &device) + return device, ret +} + // nvml.DeviceGetHandleByPciBusId() func (l *library) DeviceGetHandleByPciBusId(pciBusId string) (Device, Return) { var device nvmlDevice @@ -2085,28 +2071,35 @@ func (device nvmlDevice) GetGpuInstanceProfileInfo(profile int) (GpuInstanceProf } // nvml.DeviceGetGpuInstanceProfileInfoV() -type GpuInstanceProfileInfoV struct { +type GpuInstanceProfileInfoHandler struct { device nvmlDevice profile int } -func (infoV GpuInstanceProfileInfoV) V1() (GpuInstanceProfileInfo, Return) { - return DeviceGetGpuInstanceProfileInfo(infoV.device, infoV.profile) +func (handler GpuInstanceProfileInfoHandler) V1() (GpuInstanceProfileInfo, Return) { + return DeviceGetGpuInstanceProfileInfo(handler.device, handler.profile) } -func (infoV GpuInstanceProfileInfoV) V2() (GpuInstanceProfileInfo_v2, Return) { +func (handler GpuInstanceProfileInfoHandler) V2() (GpuInstanceProfileInfo_v2, Return) { var info GpuInstanceProfileInfo_v2 info.Version = STRUCT_VERSION(info, 2) - ret := nvmlDeviceGetGpuInstanceProfileInfoV(infoV.device, uint32(infoV.profile), &info) + ret := nvmlDeviceGetGpuInstanceProfileInfoV(handler.device, uint32(handler.profile), &info) return info, ret } -func (l *library) DeviceGetGpuInstanceProfileInfoV(device Device, profile int) GpuInstanceProfileInfoV { +func (handler GpuInstanceProfileInfoHandler) V3() (GpuInstanceProfileInfo_v3, Return) { + var info GpuInstanceProfileInfo_v3 + info.Version = STRUCT_VERSION(info, 3) + ret := nvmlDeviceGetGpuInstanceProfileInfoV(handler.device, uint32(handler.profile), (*GpuInstanceProfileInfo_v2)(unsafe.Pointer(&info))) + return info, ret +} + +func (l *library) DeviceGetGpuInstanceProfileInfoV(device Device, profile int) GpuInstanceProfileInfoHandler { return device.GetGpuInstanceProfileInfoV(profile) } -func (device nvmlDevice) GetGpuInstanceProfileInfoV(profile int) GpuInstanceProfileInfoV { - return GpuInstanceProfileInfoV{device, profile} +func (device nvmlDevice) GetGpuInstanceProfileInfoV(profile int) GpuInstanceProfileInfoHandler { + return GpuInstanceProfileInfoHandler{device, profile} } // nvml.DeviceGetGpuInstancePossiblePlacements() @@ -2191,7 +2184,7 @@ func (device nvmlDevice) GetGpuInstances(info *GpuInstanceProfileInfo) ([]GpuIns if info == nil { return nil, ERROR_INVALID_ARGUMENT } - var count uint32 = info.InstanceCount + var count = info.InstanceCount gpuInstances := make([]nvmlGpuInstance, count) ret := nvmlDeviceGetGpuInstances(device, info.Id, &gpuInstances[0], &count) return convertSlice[nvmlGpuInstance, GpuInstance](gpuInstances[:count]), ret @@ -2231,29 +2224,36 @@ func (gpuInstance nvmlGpuInstance) GetComputeInstanceProfileInfo(profile int, en } // nvml.GpuInstanceGetComputeInstanceProfileInfoV() -type ComputeInstanceProfileInfoV struct { +type ComputeInstanceProfileInfoHandler struct { gpuInstance nvmlGpuInstance profile int engProfile int } -func (infoV ComputeInstanceProfileInfoV) V1() (ComputeInstanceProfileInfo, Return) { - return GpuInstanceGetComputeInstanceProfileInfo(infoV.gpuInstance, infoV.profile, infoV.engProfile) +func (handler ComputeInstanceProfileInfoHandler) V1() (ComputeInstanceProfileInfo, Return) { + return GpuInstanceGetComputeInstanceProfileInfo(handler.gpuInstance, handler.profile, handler.engProfile) } -func (infoV ComputeInstanceProfileInfoV) V2() (ComputeInstanceProfileInfo_v2, Return) { +func (handler ComputeInstanceProfileInfoHandler) V2() (ComputeInstanceProfileInfo_v2, Return) { var info ComputeInstanceProfileInfo_v2 info.Version = STRUCT_VERSION(info, 2) - ret := nvmlGpuInstanceGetComputeInstanceProfileInfoV(infoV.gpuInstance, uint32(infoV.profile), uint32(infoV.engProfile), &info) + ret := nvmlGpuInstanceGetComputeInstanceProfileInfoV(handler.gpuInstance, uint32(handler.profile), uint32(handler.engProfile), &info) return info, ret } -func (l *library) GpuInstanceGetComputeInstanceProfileInfoV(gpuInstance GpuInstance, profile int, engProfile int) ComputeInstanceProfileInfoV { +func (handler ComputeInstanceProfileInfoHandler) V3() (ComputeInstanceProfileInfo_v3, Return) { + var info ComputeInstanceProfileInfo_v3 + info.Version = STRUCT_VERSION(info, 3) + ret := nvmlGpuInstanceGetComputeInstanceProfileInfoV(handler.gpuInstance, uint32(handler.profile), uint32(handler.engProfile), (*ComputeInstanceProfileInfo_v2)(unsafe.Pointer(&info))) + return info, ret +} + +func (l *library) GpuInstanceGetComputeInstanceProfileInfoV(gpuInstance GpuInstance, profile int, engProfile int) ComputeInstanceProfileInfoHandler { return gpuInstance.GetComputeInstanceProfileInfoV(profile, engProfile) } -func (gpuInstance nvmlGpuInstance) GetComputeInstanceProfileInfoV(profile int, engProfile int) ComputeInstanceProfileInfoV { - return ComputeInstanceProfileInfoV{gpuInstance, profile, engProfile} +func (gpuInstance nvmlGpuInstance) GetComputeInstanceProfileInfoV(profile int, engProfile int) ComputeInstanceProfileInfoHandler { + return ComputeInstanceProfileInfoHandler{gpuInstance, profile, engProfile} } // nvml.GpuInstanceGetComputeInstanceRemainingCapacity() @@ -2302,7 +2302,7 @@ func (gpuInstance nvmlGpuInstance) GetComputeInstances(info *ComputeInstanceProf if info == nil { return nil, ERROR_INVALID_ARGUMENT } - var count uint32 = info.InstanceCount + var count = info.InstanceCount computeInstances := make([]nvmlComputeInstance, count) ret := nvmlGpuInstanceGetComputeInstances(gpuInstance, info.Id, &computeInstances[0], &count) return convertSlice[nvmlComputeInstance, ComputeInstance](computeInstances[:count]), ret @@ -2737,31 +2737,678 @@ func (device nvmlDevice) GetGpuFabricInfo() (GpuFabricInfo, Return) { return gpuFabricInfo, ret } -// nvml.DeviceCcuGetStreamState() -func (l *library) DeviceCcuGetStreamState(device Device) (int, Return) { - return device.CcuGetStreamState() +// nvml.DeviceSetNvLinkDeviceLowPowerThreshold() +func (l *library) DeviceSetNvLinkDeviceLowPowerThreshold(device Device, info *NvLinkPowerThres) Return { + return device.SetNvLinkDeviceLowPowerThreshold(info) +} + +func (device nvmlDevice) SetNvLinkDeviceLowPowerThreshold(info *NvLinkPowerThres) Return { + return nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info) } -func (device nvmlDevice) CcuGetStreamState() (int, Return) { - var state uint32 - ret := nvmlDeviceCcuGetStreamState(device, &state) - return int(state), ret +// nvml.DeviceGetModuleId() +func (l *library) DeviceGetModuleId(device Device) (int, Return) { + return device.GetModuleId() } -// nvml.DeviceCcuSetStreamState() -func (l *library) DeviceCcuSetStreamState(device Device, state int) Return { - return device.CcuSetStreamState(state) +func (device nvmlDevice) GetModuleId() (int, Return) { + var moduleID uint32 + ret := nvmlDeviceGetModuleId(device, &moduleID) + return int(moduleID), ret } -func (device nvmlDevice) CcuSetStreamState(state int) Return { - return nvmlDeviceCcuSetStreamState(device, uint32(state)) +// nvml.DeviceGetCurrentClocksEventReasons() +func (l *library) DeviceGetCurrentClocksEventReasons(device Device) (uint64, Return) { + return device.GetCurrentClocksEventReasons() } -// nvml.DeviceSetNvLinkDeviceLowPowerThreshold() -func (l *library) DeviceSetNvLinkDeviceLowPowerThreshold(device Device, info *NvLinkPowerThres) Return { - return device.SetNvLinkDeviceLowPowerThreshold(info) +func (device nvmlDevice) GetCurrentClocksEventReasons() (uint64, Return) { + var clocksEventReasons uint64 + ret := nvmlDeviceGetCurrentClocksEventReasons(device, &clocksEventReasons) + return clocksEventReasons, ret } -func (device nvmlDevice) SetNvLinkDeviceLowPowerThreshold(info *NvLinkPowerThres) Return { - return nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info) +// nvml.DeviceGetSupportedClocksEventReasons() +func (l *library) DeviceGetSupportedClocksEventReasons(device Device) (uint64, Return) { + return device.GetSupportedClocksEventReasons() +} + +func (device nvmlDevice) GetSupportedClocksEventReasons() (uint64, Return) { + var supportedClocksEventReasons uint64 + ret := nvmlDeviceGetSupportedClocksEventReasons(device, &supportedClocksEventReasons) + return supportedClocksEventReasons, ret +} + +// nvml.DeviceGetJpgUtilization() +func (l *library) DeviceGetJpgUtilization(device Device) (uint32, uint32, Return) { + return device.GetJpgUtilization() +} + +func (device nvmlDevice) GetJpgUtilization() (uint32, uint32, Return) { + var utilization, samplingPeriodUs uint32 + ret := nvmlDeviceGetJpgUtilization(device, &utilization, &samplingPeriodUs) + return utilization, samplingPeriodUs, ret +} + +// nvml.DeviceGetOfaUtilization() +func (l *library) DeviceGetOfaUtilization(device Device) (uint32, uint32, Return) { + return device.GetOfaUtilization() +} + +func (device nvmlDevice) GetOfaUtilization() (uint32, uint32, Return) { + var utilization, samplingPeriodUs uint32 + ret := nvmlDeviceGetOfaUtilization(device, &utilization, &samplingPeriodUs) + return utilization, samplingPeriodUs, ret +} + +// nvml.DeviceGetRunningProcessDetailList() +func (l *library) DeviceGetRunningProcessDetailList(device Device) (ProcessDetailList, Return) { + return device.GetRunningProcessDetailList() +} + +func (device nvmlDevice) GetRunningProcessDetailList() (ProcessDetailList, Return) { + var plist ProcessDetailList + plist.Version = STRUCT_VERSION(plist, 1) + ret := nvmlDeviceGetRunningProcessDetailList(device, &plist) + return plist, ret +} + +// nvml.DeviceGetConfComputeMemSizeInfo() +func (l *library) DeviceGetConfComputeMemSizeInfo(device Device) (ConfComputeMemSizeInfo, Return) { + return device.GetConfComputeMemSizeInfo() +} + +func (device nvmlDevice) GetConfComputeMemSizeInfo() (ConfComputeMemSizeInfo, Return) { + var memInfo ConfComputeMemSizeInfo + ret := nvmlDeviceGetConfComputeMemSizeInfo(device, &memInfo) + return memInfo, ret +} + +// nvml.DeviceGetConfComputeProtectedMemoryUsage() +func (l *library) DeviceGetConfComputeProtectedMemoryUsage(device Device) (Memory, Return) { + return device.GetConfComputeProtectedMemoryUsage() +} + +func (device nvmlDevice) GetConfComputeProtectedMemoryUsage() (Memory, Return) { + var memory Memory + ret := nvmlDeviceGetConfComputeProtectedMemoryUsage(device, &memory) + return memory, ret +} + +// nvml.DeviceGetConfComputeGpuCertificate() +func (l *library) DeviceGetConfComputeGpuCertificate(device Device) (ConfComputeGpuCertificate, Return) { + return device.GetConfComputeGpuCertificate() +} + +func (device nvmlDevice) GetConfComputeGpuCertificate() (ConfComputeGpuCertificate, Return) { + var gpuCert ConfComputeGpuCertificate + ret := nvmlDeviceGetConfComputeGpuCertificate(device, &gpuCert) + return gpuCert, ret +} + +// nvml.DeviceGetConfComputeGpuAttestationReport() +func (l *library) DeviceGetConfComputeGpuAttestationReport(device Device) (ConfComputeGpuAttestationReport, Return) { + return device.GetConfComputeGpuAttestationReport() +} + +func (device nvmlDevice) GetConfComputeGpuAttestationReport() (ConfComputeGpuAttestationReport, Return) { + var gpuAtstReport ConfComputeGpuAttestationReport + ret := nvmlDeviceGetConfComputeGpuAttestationReport(device, &gpuAtstReport) + return gpuAtstReport, ret +} + +// nvml.DeviceSetConfComputeUnprotectedMemSize() +func (l *library) DeviceSetConfComputeUnprotectedMemSize(device Device, sizeKiB uint64) Return { + return device.SetConfComputeUnprotectedMemSize(sizeKiB) +} + +func (device nvmlDevice) SetConfComputeUnprotectedMemSize(sizeKiB uint64) Return { + return nvmlDeviceSetConfComputeUnprotectedMemSize(device, sizeKiB) +} + +// nvml.DeviceSetPowerManagementLimit_v2() +func (l *library) DeviceSetPowerManagementLimit_v2(device Device, powerValue *PowerValue_v2) Return { + return device.SetPowerManagementLimit_v2(powerValue) +} + +func (device nvmlDevice) SetPowerManagementLimit_v2(powerValue *PowerValue_v2) Return { + return nvmlDeviceSetPowerManagementLimit_v2(device, powerValue) +} + +// nvml.DeviceGetC2cModeInfoV() +type C2cModeInfoHandler struct { + device nvmlDevice +} + +func (handler C2cModeInfoHandler) V1() (C2cModeInfo_v1, Return) { + var c2cModeInfo C2cModeInfo_v1 + ret := nvmlDeviceGetC2cModeInfoV(handler.device, &c2cModeInfo) + return c2cModeInfo, ret +} + +func (l *library) DeviceGetC2cModeInfoV(device Device) C2cModeInfoHandler { + return device.GetC2cModeInfoV() +} + +func (device nvmlDevice) GetC2cModeInfoV() C2cModeInfoHandler { + return C2cModeInfoHandler{device} +} + +// nvml.DeviceGetLastBBXFlushTime() +func (l *library) DeviceGetLastBBXFlushTime(device Device) (uint64, uint, Return) { + return device.GetLastBBXFlushTime() +} + +func (device nvmlDevice) GetLastBBXFlushTime() (uint64, uint, Return) { + var timestamp uint64 + var durationUs uint + ret := nvmlDeviceGetLastBBXFlushTime(device, ×tamp, &durationUs) + return timestamp, durationUs, ret +} + +// nvml.DeviceGetNumaNodeId() +func (l *library) DeviceGetNumaNodeId(device Device) (int, Return) { + return device.GetNumaNodeId() +} + +func (device nvmlDevice) GetNumaNodeId() (int, Return) { + var node uint32 + ret := nvmlDeviceGetNumaNodeId(device, &node) + return int(node), ret +} + +// nvml.DeviceGetPciInfoExt() +func (l *library) DeviceGetPciInfoExt(device Device) (PciInfoExt, Return) { + return device.GetPciInfoExt() +} + +func (device nvmlDevice) GetPciInfoExt() (PciInfoExt, Return) { + var pciInfo PciInfoExt + pciInfo.Version = STRUCT_VERSION(pciInfo, 1) + ret := nvmlDeviceGetPciInfoExt(device, &pciInfo) + return pciInfo, ret +} + +// nvml.DeviceGetGpuFabricInfoV() +type GpuFabricInfoHandler struct { + device nvmlDevice +} + +func (handler GpuFabricInfoHandler) V1() (GpuFabricInfo, Return) { + return handler.device.GetGpuFabricInfo() +} + +func (handler GpuFabricInfoHandler) V2() (GpuFabricInfo_v2, Return) { + var info GpuFabricInfoV + info.Version = STRUCT_VERSION(info, 2) + ret := nvmlDeviceGetGpuFabricInfoV(handler.device, &info) + return GpuFabricInfo_v2(info), ret +} + +func (l *library) DeviceGetGpuFabricInfoV(device Device) GpuFabricInfoHandler { + return device.GetGpuFabricInfoV() +} + +func (device nvmlDevice) GetGpuFabricInfoV() GpuFabricInfoHandler { + return GpuFabricInfoHandler{device} +} + +// nvml.DeviceGetProcessesUtilizationInfo() +func (l *library) DeviceGetProcessesUtilizationInfo(device Device) (ProcessesUtilizationInfo, Return) { + return device.GetProcessesUtilizationInfo() +} + +func (device nvmlDevice) GetProcessesUtilizationInfo() (ProcessesUtilizationInfo, Return) { + var processesUtilInfo ProcessesUtilizationInfo + ret := nvmlDeviceGetProcessesUtilizationInfo(device, &processesUtilInfo) + return processesUtilInfo, ret +} + +// nvml.DeviceGetVgpuHeterogeneousMode() +func (l *library) DeviceGetVgpuHeterogeneousMode(device Device) (VgpuHeterogeneousMode, Return) { + return device.GetVgpuHeterogeneousMode() +} + +func (device nvmlDevice) GetVgpuHeterogeneousMode() (VgpuHeterogeneousMode, Return) { + var heterogeneousMode VgpuHeterogeneousMode + heterogeneousMode.Version = STRUCT_VERSION(heterogeneousMode, 1) + ret := nvmlDeviceGetVgpuHeterogeneousMode(device, &heterogeneousMode) + return heterogeneousMode, ret +} + +// nvml.DeviceSetVgpuHeterogeneousMode() +func (l *library) DeviceSetVgpuHeterogeneousMode(device Device, heterogeneousMode VgpuHeterogeneousMode) Return { + return device.SetVgpuHeterogeneousMode(heterogeneousMode) +} + +func (device nvmlDevice) SetVgpuHeterogeneousMode(heterogeneousMode VgpuHeterogeneousMode) Return { + ret := nvmlDeviceSetVgpuHeterogeneousMode(device, &heterogeneousMode) + return ret +} + +// nvml.DeviceGetVgpuTypeSupportedPlacements() +func (l *library) DeviceGetVgpuTypeSupportedPlacements(device Device, vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return device.GetVgpuTypeSupportedPlacements(vgpuTypeId) +} + +func (device nvmlDevice) GetVgpuTypeSupportedPlacements(vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return vgpuTypeId.GetSupportedPlacements(device) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetSupportedPlacements(device Device) (VgpuPlacementList, Return) { + var placementList VgpuPlacementList + placementList.Version = STRUCT_VERSION(placementList, 1) + ret := nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDeviceHandle(device), vgpuTypeId, &placementList) + return placementList, ret +} + +// nvml.DeviceGetVgpuTypeCreatablePlacements() +func (l *library) DeviceGetVgpuTypeCreatablePlacements(device Device, vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return device.GetVgpuTypeCreatablePlacements(vgpuTypeId) +} + +func (device nvmlDevice) GetVgpuTypeCreatablePlacements(vgpuTypeId VgpuTypeId) (VgpuPlacementList, Return) { + return vgpuTypeId.GetCreatablePlacements(device) +} + +func (vgpuTypeId nvmlVgpuTypeId) GetCreatablePlacements(device Device) (VgpuPlacementList, Return) { + var placementList VgpuPlacementList + placementList.Version = STRUCT_VERSION(placementList, 1) + ret := nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDeviceHandle(device), vgpuTypeId, &placementList) + return placementList, ret +} + +// nvml.DeviceSetVgpuCapabilities() +func (l *library) DeviceSetVgpuCapabilities(device Device, capability DeviceVgpuCapability, state EnableState) Return { + return device.SetVgpuCapabilities(capability, state) +} + +func (device nvmlDevice) SetVgpuCapabilities(capability DeviceVgpuCapability, state EnableState) Return { + ret := nvmlDeviceSetVgpuCapabilities(device, capability, state) + return ret +} + +// nvml.DeviceGetVgpuInstancesUtilizationInfo() +func (l *library) DeviceGetVgpuInstancesUtilizationInfo(device Device) (VgpuInstancesUtilizationInfo, Return) { + return device.GetVgpuInstancesUtilizationInfo() +} + +func (device nvmlDevice) GetVgpuInstancesUtilizationInfo() (VgpuInstancesUtilizationInfo, Return) { + var vgpuUtilInfo VgpuInstancesUtilizationInfo + ret := nvmlDeviceGetVgpuInstancesUtilizationInfo(device, &vgpuUtilInfo) + return vgpuUtilInfo, ret +} + +// nvml.DeviceGetVgpuProcessesUtilizationInfo() +func (l *library) DeviceGetVgpuProcessesUtilizationInfo(device Device) (VgpuProcessesUtilizationInfo, Return) { + return device.GetVgpuProcessesUtilizationInfo() +} + +func (device nvmlDevice) GetVgpuProcessesUtilizationInfo() (VgpuProcessesUtilizationInfo, Return) { + var vgpuProcUtilInfo VgpuProcessesUtilizationInfo + vgpuProcUtilInfo.Version = STRUCT_VERSION(vgpuProcUtilInfo, 1) + ret := nvmlDeviceGetVgpuProcessesUtilizationInfo(device, &vgpuProcUtilInfo) + return vgpuProcUtilInfo, ret +} + +// nvml.DeviceGetSramEccErrorStatus() +func (l *library) DeviceGetSramEccErrorStatus(device Device) (EccSramErrorStatus, Return) { + return device.GetSramEccErrorStatus() +} + +func (device nvmlDevice) GetSramEccErrorStatus() (EccSramErrorStatus, Return) { + var status EccSramErrorStatus + status.Version = STRUCT_VERSION(status, 1) + ret := nvmlDeviceGetSramEccErrorStatus(device, &status) + return status, ret +} + +// nvml.DeviceGetClockOffsets() +func (l *library) DeviceGetClockOffsets(device Device) (ClockOffset, Return) { + return device.GetClockOffsets() +} + +func (device nvmlDevice) GetClockOffsets() (ClockOffset, Return) { + var info ClockOffset + info.Version = STRUCT_VERSION(info, 1) + ret := nvmlDeviceGetClockOffsets(device, &info) + return info, ret +} + +// nvml.DeviceSetClockOffsets() +func (l *library) DeviceSetClockOffsets(device Device, info ClockOffset) Return { + return device.SetClockOffsets(info) +} + +func (device nvmlDevice) SetClockOffsets(info ClockOffset) Return { + return nvmlDeviceSetClockOffsets(device, &info) +} + +// nvml.DeviceGetDriverModel_v2() +func (l *library) DeviceGetDriverModel_v2(device Device) (DriverModel, DriverModel, Return) { + return device.GetDriverModel_v2() +} + +func (device nvmlDevice) GetDriverModel_v2() (DriverModel, DriverModel, Return) { + var current, pending DriverModel + ret := nvmlDeviceGetDriverModel_v2(device, ¤t, &pending) + return current, pending, ret +} + +// nvml.DeviceGetCapabilities() +func (l *library) DeviceGetCapabilities(device Device) (DeviceCapabilities, Return) { + return device.GetCapabilities() +} + +func (device nvmlDevice) GetCapabilities() (DeviceCapabilities, Return) { + var caps DeviceCapabilities + caps.Version = STRUCT_VERSION(caps, 1) + ret := nvmlDeviceGetCapabilities(device, &caps) + return caps, ret +} + +// nvml.DeviceGetFanSpeedRPM() +func (l *library) DeviceGetFanSpeedRPM(device Device) (FanSpeedInfo, Return) { + return device.GetFanSpeedRPM() +} + +func (device nvmlDevice) GetFanSpeedRPM() (FanSpeedInfo, Return) { + var fanSpeed FanSpeedInfo + fanSpeed.Version = STRUCT_VERSION(fanSpeed, 1) + ret := nvmlDeviceGetFanSpeedRPM(device, &fanSpeed) + return fanSpeed, ret +} + +// nvml.DeviceGetCoolerInfo() +func (l *library) DeviceGetCoolerInfo(device Device) (CoolerInfo, Return) { + return device.GetCoolerInfo() +} + +func (device nvmlDevice) GetCoolerInfo() (CoolerInfo, Return) { + var coolerInfo CoolerInfo + coolerInfo.Version = STRUCT_VERSION(coolerInfo, 1) + ret := nvmlDeviceGetCoolerInfo(device, &coolerInfo) + return coolerInfo, ret +} + +// nvml.DeviceGetTemperatureV() +type TemperatureHandler struct { + device nvmlDevice +} + +func (handler TemperatureHandler) V1() (Temperature, Return) { + var temperature Temperature + temperature.Version = STRUCT_VERSION(temperature, 1) + ret := nvmlDeviceGetTemperatureV(handler.device, &temperature) + return temperature, ret +} + +func (l *library) DeviceGetTemperatureV(device Device) TemperatureHandler { + return device.GetTemperatureV() +} + +func (device nvmlDevice) GetTemperatureV() TemperatureHandler { + return TemperatureHandler{device} +} + +// nvml.DeviceGetMarginTemperature() +func (l *library) DeviceGetMarginTemperature(device Device) (MarginTemperature, Return) { + return device.GetMarginTemperature() +} + +func (device nvmlDevice) GetMarginTemperature() (MarginTemperature, Return) { + var marginTemp MarginTemperature + marginTemp.Version = STRUCT_VERSION(marginTemp, 1) + ret := nvmlDeviceGetMarginTemperature(device, &marginTemp) + return marginTemp, ret +} + +// nvml.DeviceGetPerformanceModes() +func (l *library) DeviceGetPerformanceModes(device Device) (DevicePerfModes, Return) { + return device.GetPerformanceModes() +} + +func (device nvmlDevice) GetPerformanceModes() (DevicePerfModes, Return) { + var perfModes DevicePerfModes + perfModes.Version = STRUCT_VERSION(perfModes, 1) + ret := nvmlDeviceGetPerformanceModes(device, &perfModes) + return perfModes, ret +} + +// nvml.DeviceGetCurrentClockFreqs() +func (l *library) DeviceGetCurrentClockFreqs(device Device) (DeviceCurrentClockFreqs, Return) { + return device.GetCurrentClockFreqs() +} + +func (device nvmlDevice) GetCurrentClockFreqs() (DeviceCurrentClockFreqs, Return) { + var currentClockFreqs DeviceCurrentClockFreqs + currentClockFreqs.Version = STRUCT_VERSION(currentClockFreqs, 1) + ret := nvmlDeviceGetCurrentClockFreqs(device, ¤tClockFreqs) + return currentClockFreqs, ret +} + +// nvml.DeviceGetDramEncryptionMode() +func (l *library) DeviceGetDramEncryptionMode(device Device) (DramEncryptionInfo, DramEncryptionInfo, Return) { + return device.GetDramEncryptionMode() +} + +func (device nvmlDevice) GetDramEncryptionMode() (DramEncryptionInfo, DramEncryptionInfo, Return) { + var current, pending DramEncryptionInfo + current.Version = STRUCT_VERSION(current, 1) + pending.Version = STRUCT_VERSION(pending, 1) + ret := nvmlDeviceGetDramEncryptionMode(device, ¤t, &pending) + return current, pending, ret +} + +// nvml.DeviceSetDramEncryptionMode() +func (l *library) DeviceSetDramEncryptionMode(device Device, dramEncryption *DramEncryptionInfo) Return { + return device.SetDramEncryptionMode(dramEncryption) +} + +func (device nvmlDevice) SetDramEncryptionMode(dramEncryption *DramEncryptionInfo) Return { + return nvmlDeviceSetDramEncryptionMode(device, dramEncryption) +} + +// nvml.DeviceGetPlatformInfo() +func (l *library) DeviceGetPlatformInfo(device Device) (PlatformInfo, Return) { + return device.GetPlatformInfo() +} + +func (device nvmlDevice) GetPlatformInfo() (PlatformInfo, Return) { + var platformInfo PlatformInfo + platformInfo.Version = STRUCT_VERSION(platformInfo, 1) + ret := nvmlDeviceGetPlatformInfo(device, &platformInfo) + return platformInfo, ret +} + +// nvml.DeviceGetNvlinkSupportedBwModes() +func (l *library) DeviceGetNvlinkSupportedBwModes(device Device) (NvlinkSupportedBwModes, Return) { + return device.GetNvlinkSupportedBwModes() +} + +func (device nvmlDevice) GetNvlinkSupportedBwModes() (NvlinkSupportedBwModes, Return) { + var supportedBwMode NvlinkSupportedBwModes + supportedBwMode.Version = STRUCT_VERSION(supportedBwMode, 1) + ret := nvmlDeviceGetNvlinkSupportedBwModes(device, &supportedBwMode) + return supportedBwMode, ret +} + +// nvml.DeviceGetNvlinkBwMode() +func (l *library) DeviceGetNvlinkBwMode(device Device) (NvlinkGetBwMode, Return) { + return device.GetNvlinkBwMode() +} + +func (device nvmlDevice) GetNvlinkBwMode() (NvlinkGetBwMode, Return) { + var getBwMode NvlinkGetBwMode + getBwMode.Version = STRUCT_VERSION(getBwMode, 1) + ret := nvmlDeviceGetNvlinkBwMode(device, &getBwMode) + return getBwMode, ret +} + +// nvml.DeviceSetNvlinkBwMode() +func (l *library) DeviceSetNvlinkBwMode(device Device, setBwMode *NvlinkSetBwMode) Return { + return device.SetNvlinkBwMode(setBwMode) +} + +func (device nvmlDevice) SetNvlinkBwMode(setBwMode *NvlinkSetBwMode) Return { + return nvmlDeviceSetNvlinkBwMode(device, setBwMode) +} + +// nvml.DeviceWorkloadPowerProfileGetProfilesInfo() +func (l *library) DeviceWorkloadPowerProfileGetProfilesInfo(device Device) (WorkloadPowerProfileProfilesInfo, Return) { + return device.WorkloadPowerProfileGetProfilesInfo() +} + +func (device nvmlDevice) WorkloadPowerProfileGetProfilesInfo() (WorkloadPowerProfileProfilesInfo, Return) { + var profilesInfo WorkloadPowerProfileProfilesInfo + profilesInfo.Version = STRUCT_VERSION(profilesInfo, 1) + ret := nvmlDeviceWorkloadPowerProfileGetProfilesInfo(device, &profilesInfo) + return profilesInfo, ret +} + +// nvml.DeviceWorkloadPowerProfileGetCurrentProfiles() +func (l *library) DeviceWorkloadPowerProfileGetCurrentProfiles(device Device) (WorkloadPowerProfileCurrentProfiles, Return) { + return device.WorkloadPowerProfileGetCurrentProfiles() +} + +func (device nvmlDevice) WorkloadPowerProfileGetCurrentProfiles() (WorkloadPowerProfileCurrentProfiles, Return) { + var currentProfiles WorkloadPowerProfileCurrentProfiles + currentProfiles.Version = STRUCT_VERSION(currentProfiles, 1) + ret := nvmlDeviceWorkloadPowerProfileGetCurrentProfiles(device, ¤tProfiles) + return currentProfiles, ret +} + +// nvml.DeviceWorkloadPowerProfileSetRequestedProfiles() +func (l *library) DeviceWorkloadPowerProfileSetRequestedProfiles(device Device, requestedProfiles *WorkloadPowerProfileRequestedProfiles) Return { + return device.WorkloadPowerProfileSetRequestedProfiles(requestedProfiles) +} + +func (device nvmlDevice) WorkloadPowerProfileSetRequestedProfiles(requestedProfiles *WorkloadPowerProfileRequestedProfiles) Return { + return nvmlDeviceWorkloadPowerProfileSetRequestedProfiles(device, requestedProfiles) +} + +// nvml.DeviceWorkloadPowerProfileClearRequestedProfiles() +func (l *library) DeviceWorkloadPowerProfileClearRequestedProfiles(device Device, requestedProfiles *WorkloadPowerProfileRequestedProfiles) Return { + return device.WorkloadPowerProfileClearRequestedProfiles(requestedProfiles) +} + +func (device nvmlDevice) WorkloadPowerProfileClearRequestedProfiles(requestedProfiles *WorkloadPowerProfileRequestedProfiles) Return { + return nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles) +} + +// nvml.DevicePowerSmoothingActivatePresetProfile() +func (l *library) DevicePowerSmoothingActivatePresetProfile(device Device, profile *PowerSmoothingProfile) Return { + return device.PowerSmoothingActivatePresetProfile(profile) +} + +func (device nvmlDevice) PowerSmoothingActivatePresetProfile(profile *PowerSmoothingProfile) Return { + return nvmlDevicePowerSmoothingActivatePresetProfile(device, profile) +} + +// nvml.DevicePowerSmoothingUpdatePresetProfileParam() +func (l *library) DevicePowerSmoothingUpdatePresetProfileParam(device Device, profile *PowerSmoothingProfile) Return { + return device.PowerSmoothingUpdatePresetProfileParam(profile) +} + +func (device nvmlDevice) PowerSmoothingUpdatePresetProfileParam(profile *PowerSmoothingProfile) Return { + return nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile) +} + +// nvml.DevicePowerSmoothingSetState() +func (l *library) DevicePowerSmoothingSetState(device Device, state *PowerSmoothingState) Return { + return device.PowerSmoothingSetState(state) +} + +func (device nvmlDevice) PowerSmoothingSetState(state *PowerSmoothingState) Return { + return nvmlDevicePowerSmoothingSetState(device, state) +} + +// nvml.GpuInstanceGetCreatableVgpus() +func (l *library) GpuInstanceGetCreatableVgpus(gpuInstance GpuInstance) (VgpuTypeIdInfo, Return) { + return gpuInstance.GetCreatableVgpus() +} + +func (gpuInstance nvmlGpuInstance) GetCreatableVgpus() (VgpuTypeIdInfo, Return) { + var vgpuTypeIdInfo VgpuTypeIdInfo + vgpuTypeIdInfo.Version = STRUCT_VERSION(vgpuTypeIdInfo, 1) + ret := nvmlGpuInstanceGetCreatableVgpus(gpuInstance, &vgpuTypeIdInfo) + return vgpuTypeIdInfo, ret +} + +// nvml.GpuInstanceGetActiveVgpus() +func (l *library) GpuInstanceGetActiveVgpus(gpuInstance GpuInstance) (ActiveVgpuInstanceInfo, Return) { + return gpuInstance.GetActiveVgpus() +} + +func (gpuInstance nvmlGpuInstance) GetActiveVgpus() (ActiveVgpuInstanceInfo, Return) { + var activeVgpuInstanceInfo ActiveVgpuInstanceInfo + activeVgpuInstanceInfo.Version = STRUCT_VERSION(activeVgpuInstanceInfo, 1) + ret := nvmlGpuInstanceGetActiveVgpus(gpuInstance, &activeVgpuInstanceInfo) + return activeVgpuInstanceInfo, ret +} + +// nvml.GpuInstanceSetVgpuSchedulerState() +func (l *library) GpuInstanceSetVgpuSchedulerState(gpuInstance GpuInstance, scheduler *VgpuSchedulerState) Return { + return gpuInstance.SetVgpuSchedulerState(scheduler) +} + +func (gpuInstance nvmlGpuInstance) SetVgpuSchedulerState(scheduler *VgpuSchedulerState) Return { + return nvmlGpuInstanceSetVgpuSchedulerState(gpuInstance, scheduler) +} + +// nvml.GpuInstanceGetVgpuSchedulerState() +func (l *library) GpuInstanceGetVgpuSchedulerState(gpuInstance GpuInstance) (VgpuSchedulerStateInfo, Return) { + return gpuInstance.GetVgpuSchedulerState() +} + +func (gpuInstance nvmlGpuInstance) GetVgpuSchedulerState() (VgpuSchedulerStateInfo, Return) { + var schedulerStateInfo VgpuSchedulerStateInfo + schedulerStateInfo.Version = STRUCT_VERSION(schedulerStateInfo, 1) + ret := nvmlGpuInstanceGetVgpuSchedulerState(gpuInstance, &schedulerStateInfo) + return schedulerStateInfo, ret +} + +// nvml.GpuInstanceGetVgpuSchedulerLog() +func (l *library) GpuInstanceGetVgpuSchedulerLog(gpuInstance GpuInstance) (VgpuSchedulerLogInfo, Return) { + return gpuInstance.GetVgpuSchedulerLog() +} + +func (gpuInstance nvmlGpuInstance) GetVgpuSchedulerLog() (VgpuSchedulerLogInfo, Return) { + var schedulerLogInfo VgpuSchedulerLogInfo + schedulerLogInfo.Version = STRUCT_VERSION(schedulerLogInfo, 1) + ret := nvmlGpuInstanceGetVgpuSchedulerLog(gpuInstance, &schedulerLogInfo) + return schedulerLogInfo, ret +} + +// nvml.GpuInstanceGetVgpuTypeCreatablePlacements() +func (l *library) GpuInstanceGetVgpuTypeCreatablePlacements(gpuInstance GpuInstance) (VgpuCreatablePlacementInfo, Return) { + return gpuInstance.GetVgpuTypeCreatablePlacements() +} + +func (gpuInstance nvmlGpuInstance) GetVgpuTypeCreatablePlacements() (VgpuCreatablePlacementInfo, Return) { + var creatablePlacementInfo VgpuCreatablePlacementInfo + creatablePlacementInfo.Version = STRUCT_VERSION(creatablePlacementInfo, 1) + ret := nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpuInstance, &creatablePlacementInfo) + return creatablePlacementInfo, ret +} + +// nvml.GpuInstanceGetVgpuHeterogeneousMode() +func (l *library) GpuInstanceGetVgpuHeterogeneousMode(gpuInstance GpuInstance) (VgpuHeterogeneousMode, Return) { + return gpuInstance.GetVgpuHeterogeneousMode() +} + +func (gpuInstance nvmlGpuInstance) GetVgpuHeterogeneousMode() (VgpuHeterogeneousMode, Return) { + var heterogeneousMode VgpuHeterogeneousMode + heterogeneousMode.Version = STRUCT_VERSION(heterogeneousMode, 1) + ret := nvmlGpuInstanceGetVgpuHeterogeneousMode(gpuInstance, &heterogeneousMode) + return heterogeneousMode, ret +} + +// nvml.GpuInstanceSetVgpuHeterogeneousMode() +func (l *library) GpuInstanceSetVgpuHeterogeneousMode(gpuInstance GpuInstance, heterogeneousMode *VgpuHeterogeneousMode) Return { + return gpuInstance.SetVgpuHeterogeneousMode(heterogeneousMode) +} + +func (gpuInstance nvmlGpuInstance) SetVgpuHeterogeneousMode(heterogeneousMode *VgpuHeterogeneousMode) Return { + return nvmlGpuInstanceSetVgpuHeterogeneousMode(gpuInstance, heterogeneousMode) } diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go index 933b4de..b772d57 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go @@ -23,17 +23,6 @@ type EventData struct { ComputeInstanceId uint32 } -func (e EventData) convert() nvmlEventData { - out := nvmlEventData{ - Device: e.Device.(nvmlDevice), - EventType: e.EventType, - EventData: e.EventData, - GpuInstanceId: e.GpuInstanceId, - ComputeInstanceId: e.ComputeInstanceId, - } - return out -} - func (e nvmlEventData) convert() EventData { out := EventData{ Device: e.Device, @@ -71,3 +60,23 @@ func (l *library) EventSetFree(set EventSet) Return { func (set nvmlEventSet) Free() Return { return nvmlEventSetFree(set) } + +// nvml.SystemEventSetCreate() +func (l *library) SystemEventSetCreate(request *SystemEventSetCreateRequest) Return { + return nvmlSystemEventSetCreate(request) +} + +// nvml.SystemEventSetFree() +func (l *library) SystemEventSetFree(request *SystemEventSetFreeRequest) Return { + return nvmlSystemEventSetFree(request) +} + +// nvml.SystemRegisterEvents() +func (l *library) SystemRegisterEvents(request *SystemRegisterEventRequest) Return { + return nvmlSystemRegisterEvents(request) +} + +// nvml.SystemEventSetWait() +func (l *library) SystemEventSetWait(request *SystemEventSetWaitRequest) Return { + return nvmlSystemEventSetWait(request) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go index acdb2e0..563bc59 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/gpm.go @@ -20,7 +20,7 @@ type GpmMetricsGetType struct { NumMetrics uint32 Sample1 GpmSample Sample2 GpmSample - Metrics [98]GpmMetric + Metrics [210]GpmMetric } func (g *GpmMetricsGetType) convert() *nvmlGpmMetricsGetType { @@ -30,9 +30,8 @@ func (g *GpmMetricsGetType) convert() *nvmlGpmMetricsGetType { Sample1: g.Sample1.(nvmlGpmSample), Sample2: g.Sample2.(nvmlGpmSample), } - for i := range g.Metrics { - out.Metrics[i] = g.Metrics[i] - } + copy(out.Metrics[:], g.Metrics[:]) + return out } @@ -43,28 +42,38 @@ func (g *nvmlGpmMetricsGetType) convert() *GpmMetricsGetType { Sample1: g.Sample1, Sample2: g.Sample2, } - for i := range g.Metrics { - out.Metrics[i] = g.Metrics[i] - } + copy(out.Metrics[:], g.Metrics[:]) + return out } // nvml.GpmMetricsGet() type GpmMetricsGetVType struct { - metricsGet *nvmlGpmMetricsGetType + metricsGet *GpmMetricsGetType } func (l *library) GpmMetricsGetV(metricsGet *GpmMetricsGetType) GpmMetricsGetVType { - return GpmMetricsGetVType{metricsGet.convert()} + return GpmMetricsGetVType{metricsGet} } + +// nvmlGpmMetricsGetStub is a stub function that can be overridden for testing. +var nvmlGpmMetricsGetStub = nvmlGpmMetricsGet + func (metricsGetV GpmMetricsGetVType) V1() Return { metricsGetV.metricsGet.Version = 1 - return nvmlGpmMetricsGet(metricsGetV.metricsGet) + return gpmMetricsGet(metricsGetV.metricsGet) } func (l *library) GpmMetricsGet(metricsGet *GpmMetricsGetType) Return { metricsGet.Version = GPM_METRICS_GET_VERSION - return nvmlGpmMetricsGet(metricsGet.convert()) + return gpmMetricsGet(metricsGet) +} + +func gpmMetricsGet(metricsGet *GpmMetricsGetType) Return { + nvmlMetricsGet := metricsGet.convert() + ret := nvmlGpmMetricsGetStub(nvmlMetricsGet) + *metricsGet = *nvmlMetricsGet.convert() + return ret } // nvml.GpmSampleFree() @@ -111,7 +120,7 @@ func (device nvmlDevice) GpmQueryDeviceSupportV() GpmSupportV { func (gpmSupportV GpmSupportV) V1() (GpmSupport, Return) { var gpmSupport GpmSupport - gpmSupport.Version = 1 + gpmSupport.Version = STRUCT_VERSION(gpmSupport, 1) ret := nvmlGpmQueryDeviceSupport(gpmSupportV.device, &gpmSupport) return gpmSupport, ret } @@ -122,7 +131,7 @@ func (l *library) GpmQueryDeviceSupport(device Device) (GpmSupport, Return) { func (device nvmlDevice) GpmQueryDeviceSupport() (GpmSupport, Return) { var gpmSupport GpmSupport - gpmSupport.Version = GPM_SUPPORT_VERSION + gpmSupport.Version = STRUCT_VERSION(gpmSupport, GPM_SUPPORT_VERSION) ret := nvmlGpmQueryDeviceSupport(device, &gpmSupport) return gpmSupport, ret } @@ -139,3 +148,23 @@ func (device nvmlDevice) GpmMigSampleGet(gpuInstanceId int, gpmSample GpmSample) func (gpmSample nvmlGpmSample) MigGet(device Device, gpuInstanceId int) Return { return nvmlGpmMigSampleGet(nvmlDeviceHandle(device), uint32(gpuInstanceId), gpmSample) } + +// nvml.GpmQueryIfStreamingEnabled() +func (l *library) GpmQueryIfStreamingEnabled(device Device) (uint32, Return) { + return device.GpmQueryIfStreamingEnabled() +} + +func (device nvmlDevice) GpmQueryIfStreamingEnabled() (uint32, Return) { + var state uint32 + ret := nvmlGpmQueryIfStreamingEnabled(device, &state) + return state, ret +} + +// nvml.GpmSetStreamingEnabled() +func (l *library) GpmSetStreamingEnabled(device Device, state uint32) Return { + return device.GpmSetStreamingEnabled(state) +} + +func (device nvmlDevice) GpmSetStreamingEnabled(state uint32) Return { + return nvmlGpmSetStreamingEnabled(device, state) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go index bc4c3de..5a7e688 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/lib.go @@ -163,6 +163,7 @@ var GetBlacklistDeviceCount = GetExcludedDeviceCount var GetBlacklistDeviceInfoByIndex = GetExcludedDeviceInfoByIndex var nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v1 var nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v1 +var nvmlDeviceGetDriverModel = nvmlDeviceGetDriverModel_v1 // BlacklistDeviceInfo was replaced by ExcludedDeviceInfo type BlacklistDeviceInfo = ExcludedDeviceInfo @@ -288,4 +289,8 @@ func (l *library) updateVersionedSymbols() { if err == nil { nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v2 } + err = l.dl.Lookup("nvmlDeviceGetDriverModel_v2") + if err == nil { + nvmlDeviceGetDriverModel = nvmlDeviceGetDriverModel_v2 + } } diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/device.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/device.go index cb8f0cf..27f22fb 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/device.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/device.go @@ -18,12 +18,6 @@ var _ nvml.Device = &Device{} // // // make and configure a mocked nvml.Device // mockedDevice := &Device{ -// CcuGetStreamStateFunc: func() (int, nvml.Return) { -// panic("mock out the CcuGetStreamState method") -// }, -// CcuSetStreamStateFunc: func(n int) nvml.Return { -// panic("mock out the CcuSetStreamState method") -// }, // ClearAccountingPidsFunc: func() nvml.Return { // panic("mock out the ClearAccountingPids method") // }, @@ -96,6 +90,12 @@ var _ nvml.Device = &Device{} // GetBusTypeFunc: func() (nvml.BusType, nvml.Return) { // panic("mock out the GetBusType method") // }, +// GetC2cModeInfoVFunc: func() nvml.C2cModeInfoHandler { +// panic("mock out the GetC2cModeInfoV method") +// }, +// GetCapabilitiesFunc: func() (nvml.DeviceCapabilities, nvml.Return) { +// panic("mock out the GetCapabilities method") +// }, // GetClkMonStatusFunc: func() (nvml.ClkMonStatus, nvml.Return) { // panic("mock out the GetClkMonStatus method") // }, @@ -105,6 +105,9 @@ var _ nvml.Device = &Device{} // GetClockInfoFunc: func(clockType nvml.ClockType) (uint32, nvml.Return) { // panic("mock out the GetClockInfo method") // }, +// GetClockOffsetsFunc: func() (nvml.ClockOffset, nvml.Return) { +// panic("mock out the GetClockOffsets method") +// }, // GetComputeInstanceIdFunc: func() (int, nvml.Return) { // panic("mock out the GetComputeInstanceId method") // }, @@ -114,6 +117,21 @@ var _ nvml.Device = &Device{} // GetComputeRunningProcessesFunc: func() ([]nvml.ProcessInfo, nvml.Return) { // panic("mock out the GetComputeRunningProcesses method") // }, +// GetConfComputeGpuAttestationReportFunc: func() (nvml.ConfComputeGpuAttestationReport, nvml.Return) { +// panic("mock out the GetConfComputeGpuAttestationReport method") +// }, +// GetConfComputeGpuCertificateFunc: func() (nvml.ConfComputeGpuCertificate, nvml.Return) { +// panic("mock out the GetConfComputeGpuCertificate method") +// }, +// GetConfComputeMemSizeInfoFunc: func() (nvml.ConfComputeMemSizeInfo, nvml.Return) { +// panic("mock out the GetConfComputeMemSizeInfo method") +// }, +// GetConfComputeProtectedMemoryUsageFunc: func() (nvml.Memory, nvml.Return) { +// panic("mock out the GetConfComputeProtectedMemoryUsage method") +// }, +// GetCoolerInfoFunc: func() (nvml.CoolerInfo, nvml.Return) { +// panic("mock out the GetCoolerInfo method") +// }, // GetCpuAffinityFunc: func(n int) ([]uint, nvml.Return) { // panic("mock out the GetCpuAffinity method") // }, @@ -132,6 +150,12 @@ var _ nvml.Device = &Device{} // GetCurrPcieLinkWidthFunc: func() (int, nvml.Return) { // panic("mock out the GetCurrPcieLinkWidth method") // }, +// GetCurrentClockFreqsFunc: func() (nvml.DeviceCurrentClockFreqs, nvml.Return) { +// panic("mock out the GetCurrentClockFreqs method") +// }, +// GetCurrentClocksEventReasonsFunc: func() (uint64, nvml.Return) { +// panic("mock out the GetCurrentClocksEventReasons method") +// }, // GetCurrentClocksThrottleReasonsFunc: func() (uint64, nvml.Return) { // panic("mock out the GetCurrentClocksThrottleReasons method") // }, @@ -156,9 +180,15 @@ var _ nvml.Device = &Device{} // GetDisplayModeFunc: func() (nvml.EnableState, nvml.Return) { // panic("mock out the GetDisplayMode method") // }, +// GetDramEncryptionModeFunc: func() (nvml.DramEncryptionInfo, nvml.DramEncryptionInfo, nvml.Return) { +// panic("mock out the GetDramEncryptionMode method") +// }, // GetDriverModelFunc: func() (nvml.DriverModel, nvml.DriverModel, nvml.Return) { // panic("mock out the GetDriverModel method") // }, +// GetDriverModel_v2Func: func() (nvml.DriverModel, nvml.DriverModel, nvml.Return) { +// panic("mock out the GetDriverModel_v2 method") +// }, // GetDynamicPstatesInfoFunc: func() (nvml.GpuDynamicPstatesInfo, nvml.Return) { // panic("mock out the GetDynamicPstatesInfo method") // }, @@ -192,6 +222,9 @@ var _ nvml.Device = &Device{} // GetFanSpeedFunc: func() (uint32, nvml.Return) { // panic("mock out the GetFanSpeed method") // }, +// GetFanSpeedRPMFunc: func() (nvml.FanSpeedInfo, nvml.Return) { +// panic("mock out the GetFanSpeedRPM method") +// }, // GetFanSpeed_v2Func: func(n int) (uint32, nvml.Return) { // panic("mock out the GetFanSpeed_v2 method") // }, @@ -207,6 +240,9 @@ var _ nvml.Device = &Device{} // GetGpuFabricInfoFunc: func() (nvml.GpuFabricInfo, nvml.Return) { // panic("mock out the GetGpuFabricInfo method") // }, +// GetGpuFabricInfoVFunc: func() nvml.GpuFabricInfoHandler { +// panic("mock out the GetGpuFabricInfoV method") +// }, // GetGpuInstanceByIdFunc: func(n int) (nvml.GpuInstance, nvml.Return) { // panic("mock out the GetGpuInstanceById method") // }, @@ -219,7 +255,7 @@ var _ nvml.Device = &Device{} // GetGpuInstanceProfileInfoFunc: func(n int) (nvml.GpuInstanceProfileInfo, nvml.Return) { // panic("mock out the GetGpuInstanceProfileInfo method") // }, -// GetGpuInstanceProfileInfoVFunc: func(n int) nvml.GpuInstanceProfileInfoV { +// GetGpuInstanceProfileInfoVFunc: func(n int) nvml.GpuInstanceProfileInfoHandler { // panic("mock out the GetGpuInstanceProfileInfoV method") // }, // GetGpuInstanceRemainingCapacityFunc: func(gpuInstanceProfileInfo *nvml.GpuInstanceProfileInfo) (int, nvml.Return) { @@ -264,9 +300,18 @@ var _ nvml.Device = &Device{} // GetIrqNumFunc: func() (int, nvml.Return) { // panic("mock out the GetIrqNum method") // }, +// GetJpgUtilizationFunc: func() (uint32, uint32, nvml.Return) { +// panic("mock out the GetJpgUtilization method") +// }, +// GetLastBBXFlushTimeFunc: func() (uint64, uint, nvml.Return) { +// panic("mock out the GetLastBBXFlushTime method") +// }, // GetMPSComputeRunningProcessesFunc: func() ([]nvml.ProcessInfo, nvml.Return) { // panic("mock out the GetMPSComputeRunningProcesses method") // }, +// GetMarginTemperatureFunc: func() (nvml.MarginTemperature, nvml.Return) { +// panic("mock out the GetMarginTemperature method") +// }, // GetMaxClockInfoFunc: func(clockType nvml.ClockType) (uint32, nvml.Return) { // panic("mock out the GetMaxClockInfo method") // }, @@ -318,6 +363,9 @@ var _ nvml.Device = &Device{} // GetMinorNumberFunc: func() (int, nvml.Return) { // panic("mock out the GetMinorNumber method") // }, +// GetModuleIdFunc: func() (int, nvml.Return) { +// panic("mock out the GetModuleId method") +// }, // GetMultiGpuBoardFunc: func() (int, nvml.Return) { // panic("mock out the GetMultiGpuBoard method") // }, @@ -330,6 +378,9 @@ var _ nvml.Device = &Device{} // GetNumGpuCoresFunc: func() (int, nvml.Return) { // panic("mock out the GetNumGpuCores method") // }, +// GetNumaNodeIdFunc: func() (int, nvml.Return) { +// panic("mock out the GetNumaNodeId method") +// }, // GetNvLinkCapabilityFunc: func(n int, nvLinkCapability nvml.NvLinkCapability) (uint32, nvml.Return) { // panic("mock out the GetNvLinkCapability method") // }, @@ -354,12 +405,24 @@ var _ nvml.Device = &Device{} // GetNvLinkVersionFunc: func(n int) (uint32, nvml.Return) { // panic("mock out the GetNvLinkVersion method") // }, +// GetNvlinkBwModeFunc: func() (nvml.NvlinkGetBwMode, nvml.Return) { +// panic("mock out the GetNvlinkBwMode method") +// }, +// GetNvlinkSupportedBwModesFunc: func() (nvml.NvlinkSupportedBwModes, nvml.Return) { +// panic("mock out the GetNvlinkSupportedBwModes method") +// }, +// GetOfaUtilizationFunc: func() (uint32, uint32, nvml.Return) { +// panic("mock out the GetOfaUtilization method") +// }, // GetP2PStatusFunc: func(device nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) { // panic("mock out the GetP2PStatus method") // }, // GetPciInfoFunc: func() (nvml.PciInfo, nvml.Return) { // panic("mock out the GetPciInfo method") // }, +// GetPciInfoExtFunc: func() (nvml.PciInfoExt, nvml.Return) { +// panic("mock out the GetPciInfoExt method") +// }, // GetPcieLinkMaxSpeedFunc: func() (uint32, nvml.Return) { // panic("mock out the GetPcieLinkMaxSpeed method") // }, @@ -372,6 +435,9 @@ var _ nvml.Device = &Device{} // GetPcieThroughputFunc: func(pcieUtilCounter nvml.PcieUtilCounter) (uint32, nvml.Return) { // panic("mock out the GetPcieThroughput method") // }, +// GetPerformanceModesFunc: func() (nvml.DevicePerfModes, nvml.Return) { +// panic("mock out the GetPerformanceModes method") +// }, // GetPerformanceStateFunc: func() (nvml.Pstates, nvml.Return) { // panic("mock out the GetPerformanceState method") // }, @@ -381,6 +447,9 @@ var _ nvml.Device = &Device{} // GetPgpuMetadataStringFunc: func() (string, nvml.Return) { // panic("mock out the GetPgpuMetadataString method") // }, +// GetPlatformInfoFunc: func() (nvml.PlatformInfo, nvml.Return) { +// panic("mock out the GetPlatformInfo method") +// }, // GetPowerManagementDefaultLimitFunc: func() (uint32, nvml.Return) { // panic("mock out the GetPowerManagementDefaultLimit method") // }, @@ -405,6 +474,9 @@ var _ nvml.Device = &Device{} // GetProcessUtilizationFunc: func(v uint64) ([]nvml.ProcessUtilizationSample, nvml.Return) { // panic("mock out the GetProcessUtilization method") // }, +// GetProcessesUtilizationInfoFunc: func() (nvml.ProcessesUtilizationInfo, nvml.Return) { +// panic("mock out the GetProcessesUtilizationInfo method") +// }, // GetRemappedRowsFunc: func() (int, int, bool, bool, nvml.Return) { // panic("mock out the GetRemappedRows method") // }, @@ -420,12 +492,21 @@ var _ nvml.Device = &Device{} // GetRowRemapperHistogramFunc: func() (nvml.RowRemapperHistogramValues, nvml.Return) { // panic("mock out the GetRowRemapperHistogram method") // }, +// GetRunningProcessDetailListFunc: func() (nvml.ProcessDetailList, nvml.Return) { +// panic("mock out the GetRunningProcessDetailList method") +// }, // GetSamplesFunc: func(samplingType nvml.SamplingType, v uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) { // panic("mock out the GetSamples method") // }, // GetSerialFunc: func() (string, nvml.Return) { // panic("mock out the GetSerial method") // }, +// GetSramEccErrorStatusFunc: func() (nvml.EccSramErrorStatus, nvml.Return) { +// panic("mock out the GetSramEccErrorStatus method") +// }, +// GetSupportedClocksEventReasonsFunc: func() (uint64, nvml.Return) { +// panic("mock out the GetSupportedClocksEventReasons method") +// }, // GetSupportedClocksThrottleReasonsFunc: func() (uint64, nvml.Return) { // panic("mock out the GetSupportedClocksThrottleReasons method") // }, @@ -453,6 +534,9 @@ var _ nvml.Device = &Device{} // GetTemperatureThresholdFunc: func(temperatureThresholds nvml.TemperatureThresholds) (uint32, nvml.Return) { // panic("mock out the GetTemperatureThreshold method") // }, +// GetTemperatureVFunc: func() nvml.TemperatureHandler { +// panic("mock out the GetTemperatureV method") +// }, // GetThermalSettingsFunc: func(v uint32) (nvml.GpuThermalSettings, nvml.Return) { // panic("mock out the GetThermalSettings method") // }, @@ -480,12 +564,21 @@ var _ nvml.Device = &Device{} // GetVgpuCapabilitiesFunc: func(deviceVgpuCapability nvml.DeviceVgpuCapability) (bool, nvml.Return) { // panic("mock out the GetVgpuCapabilities method") // }, +// GetVgpuHeterogeneousModeFunc: func() (nvml.VgpuHeterogeneousMode, nvml.Return) { +// panic("mock out the GetVgpuHeterogeneousMode method") +// }, +// GetVgpuInstancesUtilizationInfoFunc: func() (nvml.VgpuInstancesUtilizationInfo, nvml.Return) { +// panic("mock out the GetVgpuInstancesUtilizationInfo method") +// }, // GetVgpuMetadataFunc: func() (nvml.VgpuPgpuMetadata, nvml.Return) { // panic("mock out the GetVgpuMetadata method") // }, // GetVgpuProcessUtilizationFunc: func(v uint64) ([]nvml.VgpuProcessUtilizationSample, nvml.Return) { // panic("mock out the GetVgpuProcessUtilization method") // }, +// GetVgpuProcessesUtilizationInfoFunc: func() (nvml.VgpuProcessesUtilizationInfo, nvml.Return) { +// panic("mock out the GetVgpuProcessesUtilizationInfo method") +// }, // GetVgpuSchedulerCapabilitiesFunc: func() (nvml.VgpuSchedulerCapabilities, nvml.Return) { // panic("mock out the GetVgpuSchedulerCapabilities method") // }, @@ -495,6 +588,12 @@ var _ nvml.Device = &Device{} // GetVgpuSchedulerStateFunc: func() (nvml.VgpuSchedulerGetState, nvml.Return) { // panic("mock out the GetVgpuSchedulerState method") // }, +// GetVgpuTypeCreatablePlacementsFunc: func(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { +// panic("mock out the GetVgpuTypeCreatablePlacements method") +// }, +// GetVgpuTypeSupportedPlacementsFunc: func(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { +// panic("mock out the GetVgpuTypeSupportedPlacements method") +// }, // GetVgpuUtilizationFunc: func(v uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) { // panic("mock out the GetVgpuUtilization method") // }, @@ -513,15 +612,30 @@ var _ nvml.Device = &Device{} // GpmQueryDeviceSupportVFunc: func() nvml.GpmSupportV { // panic("mock out the GpmQueryDeviceSupportV method") // }, +// GpmQueryIfStreamingEnabledFunc: func() (uint32, nvml.Return) { +// panic("mock out the GpmQueryIfStreamingEnabled method") +// }, // GpmSampleGetFunc: func(gpmSample nvml.GpmSample) nvml.Return { // panic("mock out the GpmSampleGet method") // }, +// GpmSetStreamingEnabledFunc: func(v uint32) nvml.Return { +// panic("mock out the GpmSetStreamingEnabled method") +// }, // IsMigDeviceHandleFunc: func() (bool, nvml.Return) { // panic("mock out the IsMigDeviceHandle method") // }, // OnSameBoardFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the OnSameBoard method") // }, +// PowerSmoothingActivatePresetProfileFunc: func(powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { +// panic("mock out the PowerSmoothingActivatePresetProfile method") +// }, +// PowerSmoothingSetStateFunc: func(powerSmoothingState *nvml.PowerSmoothingState) nvml.Return { +// panic("mock out the PowerSmoothingSetState method") +// }, +// PowerSmoothingUpdatePresetProfileParamFunc: func(powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { +// panic("mock out the PowerSmoothingUpdatePresetProfileParam method") +// }, // RegisterEventsFunc: func(v uint64, eventSet nvml.EventSet) nvml.Return { // panic("mock out the RegisterEvents method") // }, @@ -552,9 +666,15 @@ var _ nvml.Device = &Device{} // SetAutoBoostedClocksEnabledFunc: func(enableState nvml.EnableState) nvml.Return { // panic("mock out the SetAutoBoostedClocksEnabled method") // }, +// SetClockOffsetsFunc: func(clockOffset nvml.ClockOffset) nvml.Return { +// panic("mock out the SetClockOffsets method") +// }, // SetComputeModeFunc: func(computeMode nvml.ComputeMode) nvml.Return { // panic("mock out the SetComputeMode method") // }, +// SetConfComputeUnprotectedMemSizeFunc: func(v uint64) nvml.Return { +// panic("mock out the SetConfComputeUnprotectedMemSize method") +// }, // SetCpuAffinityFunc: func() nvml.Return { // panic("mock out the SetCpuAffinity method") // }, @@ -564,6 +684,9 @@ var _ nvml.Device = &Device{} // SetDefaultFanSpeed_v2Func: func(n int) nvml.Return { // panic("mock out the SetDefaultFanSpeed_v2 method") // }, +// SetDramEncryptionModeFunc: func(dramEncryptionInfo *nvml.DramEncryptionInfo) nvml.Return { +// panic("mock out the SetDramEncryptionMode method") +// }, // SetDriverModelFunc: func(driverModel nvml.DriverModel, v uint32) nvml.Return { // panic("mock out the SetDriverModel method") // }, @@ -600,15 +723,27 @@ var _ nvml.Device = &Device{} // SetNvLinkUtilizationControlFunc: func(n1 int, n2 int, nvLinkUtilizationControl *nvml.NvLinkUtilizationControl, b bool) nvml.Return { // panic("mock out the SetNvLinkUtilizationControl method") // }, +// SetNvlinkBwModeFunc: func(nvlinkSetBwMode *nvml.NvlinkSetBwMode) nvml.Return { +// panic("mock out the SetNvlinkBwMode method") +// }, // SetPersistenceModeFunc: func(enableState nvml.EnableState) nvml.Return { // panic("mock out the SetPersistenceMode method") // }, // SetPowerManagementLimitFunc: func(v uint32) nvml.Return { // panic("mock out the SetPowerManagementLimit method") // }, +// SetPowerManagementLimit_v2Func: func(powerValue_v2 *nvml.PowerValue_v2) nvml.Return { +// panic("mock out the SetPowerManagementLimit_v2 method") +// }, // SetTemperatureThresholdFunc: func(temperatureThresholds nvml.TemperatureThresholds, n int) nvml.Return { // panic("mock out the SetTemperatureThreshold method") // }, +// SetVgpuCapabilitiesFunc: func(deviceVgpuCapability nvml.DeviceVgpuCapability, enableState nvml.EnableState) nvml.Return { +// panic("mock out the SetVgpuCapabilities method") +// }, +// SetVgpuHeterogeneousModeFunc: func(vgpuHeterogeneousMode nvml.VgpuHeterogeneousMode) nvml.Return { +// panic("mock out the SetVgpuHeterogeneousMode method") +// }, // SetVgpuSchedulerStateFunc: func(vgpuSchedulerSetState *nvml.VgpuSchedulerSetState) nvml.Return { // panic("mock out the SetVgpuSchedulerState method") // }, @@ -621,6 +756,18 @@ var _ nvml.Device = &Device{} // VgpuTypeGetMaxInstancesFunc: func(vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) { // panic("mock out the VgpuTypeGetMaxInstances method") // }, +// WorkloadPowerProfileClearRequestedProfilesFunc: func(workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { +// panic("mock out the WorkloadPowerProfileClearRequestedProfiles method") +// }, +// WorkloadPowerProfileGetCurrentProfilesFunc: func() (nvml.WorkloadPowerProfileCurrentProfiles, nvml.Return) { +// panic("mock out the WorkloadPowerProfileGetCurrentProfiles method") +// }, +// WorkloadPowerProfileGetProfilesInfoFunc: func() (nvml.WorkloadPowerProfileProfilesInfo, nvml.Return) { +// panic("mock out the WorkloadPowerProfileGetProfilesInfo method") +// }, +// WorkloadPowerProfileSetRequestedProfilesFunc: func(workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { +// panic("mock out the WorkloadPowerProfileSetRequestedProfiles method") +// }, // } // // // use mockedDevice in code that requires nvml.Device @@ -628,12 +775,6 @@ var _ nvml.Device = &Device{} // // } type Device struct { - // CcuGetStreamStateFunc mocks the CcuGetStreamState method. - CcuGetStreamStateFunc func() (int, nvml.Return) - - // CcuSetStreamStateFunc mocks the CcuSetStreamState method. - CcuSetStreamStateFunc func(n int) nvml.Return - // ClearAccountingPidsFunc mocks the ClearAccountingPids method. ClearAccountingPidsFunc func() nvml.Return @@ -706,6 +847,12 @@ type Device struct { // GetBusTypeFunc mocks the GetBusType method. GetBusTypeFunc func() (nvml.BusType, nvml.Return) + // GetC2cModeInfoVFunc mocks the GetC2cModeInfoV method. + GetC2cModeInfoVFunc func() nvml.C2cModeInfoHandler + + // GetCapabilitiesFunc mocks the GetCapabilities method. + GetCapabilitiesFunc func() (nvml.DeviceCapabilities, nvml.Return) + // GetClkMonStatusFunc mocks the GetClkMonStatus method. GetClkMonStatusFunc func() (nvml.ClkMonStatus, nvml.Return) @@ -715,6 +862,9 @@ type Device struct { // GetClockInfoFunc mocks the GetClockInfo method. GetClockInfoFunc func(clockType nvml.ClockType) (uint32, nvml.Return) + // GetClockOffsetsFunc mocks the GetClockOffsets method. + GetClockOffsetsFunc func() (nvml.ClockOffset, nvml.Return) + // GetComputeInstanceIdFunc mocks the GetComputeInstanceId method. GetComputeInstanceIdFunc func() (int, nvml.Return) @@ -724,6 +874,21 @@ type Device struct { // GetComputeRunningProcessesFunc mocks the GetComputeRunningProcesses method. GetComputeRunningProcessesFunc func() ([]nvml.ProcessInfo, nvml.Return) + // GetConfComputeGpuAttestationReportFunc mocks the GetConfComputeGpuAttestationReport method. + GetConfComputeGpuAttestationReportFunc func() (nvml.ConfComputeGpuAttestationReport, nvml.Return) + + // GetConfComputeGpuCertificateFunc mocks the GetConfComputeGpuCertificate method. + GetConfComputeGpuCertificateFunc func() (nvml.ConfComputeGpuCertificate, nvml.Return) + + // GetConfComputeMemSizeInfoFunc mocks the GetConfComputeMemSizeInfo method. + GetConfComputeMemSizeInfoFunc func() (nvml.ConfComputeMemSizeInfo, nvml.Return) + + // GetConfComputeProtectedMemoryUsageFunc mocks the GetConfComputeProtectedMemoryUsage method. + GetConfComputeProtectedMemoryUsageFunc func() (nvml.Memory, nvml.Return) + + // GetCoolerInfoFunc mocks the GetCoolerInfo method. + GetCoolerInfoFunc func() (nvml.CoolerInfo, nvml.Return) + // GetCpuAffinityFunc mocks the GetCpuAffinity method. GetCpuAffinityFunc func(n int) ([]uint, nvml.Return) @@ -742,6 +907,12 @@ type Device struct { // GetCurrPcieLinkWidthFunc mocks the GetCurrPcieLinkWidth method. GetCurrPcieLinkWidthFunc func() (int, nvml.Return) + // GetCurrentClockFreqsFunc mocks the GetCurrentClockFreqs method. + GetCurrentClockFreqsFunc func() (nvml.DeviceCurrentClockFreqs, nvml.Return) + + // GetCurrentClocksEventReasonsFunc mocks the GetCurrentClocksEventReasons method. + GetCurrentClocksEventReasonsFunc func() (uint64, nvml.Return) + // GetCurrentClocksThrottleReasonsFunc mocks the GetCurrentClocksThrottleReasons method. GetCurrentClocksThrottleReasonsFunc func() (uint64, nvml.Return) @@ -766,9 +937,15 @@ type Device struct { // GetDisplayModeFunc mocks the GetDisplayMode method. GetDisplayModeFunc func() (nvml.EnableState, nvml.Return) + // GetDramEncryptionModeFunc mocks the GetDramEncryptionMode method. + GetDramEncryptionModeFunc func() (nvml.DramEncryptionInfo, nvml.DramEncryptionInfo, nvml.Return) + // GetDriverModelFunc mocks the GetDriverModel method. GetDriverModelFunc func() (nvml.DriverModel, nvml.DriverModel, nvml.Return) + // GetDriverModel_v2Func mocks the GetDriverModel_v2 method. + GetDriverModel_v2Func func() (nvml.DriverModel, nvml.DriverModel, nvml.Return) + // GetDynamicPstatesInfoFunc mocks the GetDynamicPstatesInfo method. GetDynamicPstatesInfoFunc func() (nvml.GpuDynamicPstatesInfo, nvml.Return) @@ -802,6 +979,9 @@ type Device struct { // GetFanSpeedFunc mocks the GetFanSpeed method. GetFanSpeedFunc func() (uint32, nvml.Return) + // GetFanSpeedRPMFunc mocks the GetFanSpeedRPM method. + GetFanSpeedRPMFunc func() (nvml.FanSpeedInfo, nvml.Return) + // GetFanSpeed_v2Func mocks the GetFanSpeed_v2 method. GetFanSpeed_v2Func func(n int) (uint32, nvml.Return) @@ -817,6 +997,9 @@ type Device struct { // GetGpuFabricInfoFunc mocks the GetGpuFabricInfo method. GetGpuFabricInfoFunc func() (nvml.GpuFabricInfo, nvml.Return) + // GetGpuFabricInfoVFunc mocks the GetGpuFabricInfoV method. + GetGpuFabricInfoVFunc func() nvml.GpuFabricInfoHandler + // GetGpuInstanceByIdFunc mocks the GetGpuInstanceById method. GetGpuInstanceByIdFunc func(n int) (nvml.GpuInstance, nvml.Return) @@ -830,7 +1013,7 @@ type Device struct { GetGpuInstanceProfileInfoFunc func(n int) (nvml.GpuInstanceProfileInfo, nvml.Return) // GetGpuInstanceProfileInfoVFunc mocks the GetGpuInstanceProfileInfoV method. - GetGpuInstanceProfileInfoVFunc func(n int) nvml.GpuInstanceProfileInfoV + GetGpuInstanceProfileInfoVFunc func(n int) nvml.GpuInstanceProfileInfoHandler // GetGpuInstanceRemainingCapacityFunc mocks the GetGpuInstanceRemainingCapacity method. GetGpuInstanceRemainingCapacityFunc func(gpuInstanceProfileInfo *nvml.GpuInstanceProfileInfo) (int, nvml.Return) @@ -874,9 +1057,18 @@ type Device struct { // GetIrqNumFunc mocks the GetIrqNum method. GetIrqNumFunc func() (int, nvml.Return) + // GetJpgUtilizationFunc mocks the GetJpgUtilization method. + GetJpgUtilizationFunc func() (uint32, uint32, nvml.Return) + + // GetLastBBXFlushTimeFunc mocks the GetLastBBXFlushTime method. + GetLastBBXFlushTimeFunc func() (uint64, uint, nvml.Return) + // GetMPSComputeRunningProcessesFunc mocks the GetMPSComputeRunningProcesses method. GetMPSComputeRunningProcessesFunc func() ([]nvml.ProcessInfo, nvml.Return) + // GetMarginTemperatureFunc mocks the GetMarginTemperature method. + GetMarginTemperatureFunc func() (nvml.MarginTemperature, nvml.Return) + // GetMaxClockInfoFunc mocks the GetMaxClockInfo method. GetMaxClockInfoFunc func(clockType nvml.ClockType) (uint32, nvml.Return) @@ -928,6 +1120,9 @@ type Device struct { // GetMinorNumberFunc mocks the GetMinorNumber method. GetMinorNumberFunc func() (int, nvml.Return) + // GetModuleIdFunc mocks the GetModuleId method. + GetModuleIdFunc func() (int, nvml.Return) + // GetMultiGpuBoardFunc mocks the GetMultiGpuBoard method. GetMultiGpuBoardFunc func() (int, nvml.Return) @@ -940,6 +1135,9 @@ type Device struct { // GetNumGpuCoresFunc mocks the GetNumGpuCores method. GetNumGpuCoresFunc func() (int, nvml.Return) + // GetNumaNodeIdFunc mocks the GetNumaNodeId method. + GetNumaNodeIdFunc func() (int, nvml.Return) + // GetNvLinkCapabilityFunc mocks the GetNvLinkCapability method. GetNvLinkCapabilityFunc func(n int, nvLinkCapability nvml.NvLinkCapability) (uint32, nvml.Return) @@ -964,12 +1162,24 @@ type Device struct { // GetNvLinkVersionFunc mocks the GetNvLinkVersion method. GetNvLinkVersionFunc func(n int) (uint32, nvml.Return) + // GetNvlinkBwModeFunc mocks the GetNvlinkBwMode method. + GetNvlinkBwModeFunc func() (nvml.NvlinkGetBwMode, nvml.Return) + + // GetNvlinkSupportedBwModesFunc mocks the GetNvlinkSupportedBwModes method. + GetNvlinkSupportedBwModesFunc func() (nvml.NvlinkSupportedBwModes, nvml.Return) + + // GetOfaUtilizationFunc mocks the GetOfaUtilization method. + GetOfaUtilizationFunc func() (uint32, uint32, nvml.Return) + // GetP2PStatusFunc mocks the GetP2PStatus method. GetP2PStatusFunc func(device nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) // GetPciInfoFunc mocks the GetPciInfo method. GetPciInfoFunc func() (nvml.PciInfo, nvml.Return) + // GetPciInfoExtFunc mocks the GetPciInfoExt method. + GetPciInfoExtFunc func() (nvml.PciInfoExt, nvml.Return) + // GetPcieLinkMaxSpeedFunc mocks the GetPcieLinkMaxSpeed method. GetPcieLinkMaxSpeedFunc func() (uint32, nvml.Return) @@ -982,6 +1192,9 @@ type Device struct { // GetPcieThroughputFunc mocks the GetPcieThroughput method. GetPcieThroughputFunc func(pcieUtilCounter nvml.PcieUtilCounter) (uint32, nvml.Return) + // GetPerformanceModesFunc mocks the GetPerformanceModes method. + GetPerformanceModesFunc func() (nvml.DevicePerfModes, nvml.Return) + // GetPerformanceStateFunc mocks the GetPerformanceState method. GetPerformanceStateFunc func() (nvml.Pstates, nvml.Return) @@ -991,6 +1204,9 @@ type Device struct { // GetPgpuMetadataStringFunc mocks the GetPgpuMetadataString method. GetPgpuMetadataStringFunc func() (string, nvml.Return) + // GetPlatformInfoFunc mocks the GetPlatformInfo method. + GetPlatformInfoFunc func() (nvml.PlatformInfo, nvml.Return) + // GetPowerManagementDefaultLimitFunc mocks the GetPowerManagementDefaultLimit method. GetPowerManagementDefaultLimitFunc func() (uint32, nvml.Return) @@ -1015,6 +1231,9 @@ type Device struct { // GetProcessUtilizationFunc mocks the GetProcessUtilization method. GetProcessUtilizationFunc func(v uint64) ([]nvml.ProcessUtilizationSample, nvml.Return) + // GetProcessesUtilizationInfoFunc mocks the GetProcessesUtilizationInfo method. + GetProcessesUtilizationInfoFunc func() (nvml.ProcessesUtilizationInfo, nvml.Return) + // GetRemappedRowsFunc mocks the GetRemappedRows method. GetRemappedRowsFunc func() (int, int, bool, bool, nvml.Return) @@ -1030,12 +1249,21 @@ type Device struct { // GetRowRemapperHistogramFunc mocks the GetRowRemapperHistogram method. GetRowRemapperHistogramFunc func() (nvml.RowRemapperHistogramValues, nvml.Return) + // GetRunningProcessDetailListFunc mocks the GetRunningProcessDetailList method. + GetRunningProcessDetailListFunc func() (nvml.ProcessDetailList, nvml.Return) + // GetSamplesFunc mocks the GetSamples method. GetSamplesFunc func(samplingType nvml.SamplingType, v uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) // GetSerialFunc mocks the GetSerial method. GetSerialFunc func() (string, nvml.Return) + // GetSramEccErrorStatusFunc mocks the GetSramEccErrorStatus method. + GetSramEccErrorStatusFunc func() (nvml.EccSramErrorStatus, nvml.Return) + + // GetSupportedClocksEventReasonsFunc mocks the GetSupportedClocksEventReasons method. + GetSupportedClocksEventReasonsFunc func() (uint64, nvml.Return) + // GetSupportedClocksThrottleReasonsFunc mocks the GetSupportedClocksThrottleReasons method. GetSupportedClocksThrottleReasonsFunc func() (uint64, nvml.Return) @@ -1063,6 +1291,9 @@ type Device struct { // GetTemperatureThresholdFunc mocks the GetTemperatureThreshold method. GetTemperatureThresholdFunc func(temperatureThresholds nvml.TemperatureThresholds) (uint32, nvml.Return) + // GetTemperatureVFunc mocks the GetTemperatureV method. + GetTemperatureVFunc func() nvml.TemperatureHandler + // GetThermalSettingsFunc mocks the GetThermalSettings method. GetThermalSettingsFunc func(v uint32) (nvml.GpuThermalSettings, nvml.Return) @@ -1090,12 +1321,21 @@ type Device struct { // GetVgpuCapabilitiesFunc mocks the GetVgpuCapabilities method. GetVgpuCapabilitiesFunc func(deviceVgpuCapability nvml.DeviceVgpuCapability) (bool, nvml.Return) + // GetVgpuHeterogeneousModeFunc mocks the GetVgpuHeterogeneousMode method. + GetVgpuHeterogeneousModeFunc func() (nvml.VgpuHeterogeneousMode, nvml.Return) + + // GetVgpuInstancesUtilizationInfoFunc mocks the GetVgpuInstancesUtilizationInfo method. + GetVgpuInstancesUtilizationInfoFunc func() (nvml.VgpuInstancesUtilizationInfo, nvml.Return) + // GetVgpuMetadataFunc mocks the GetVgpuMetadata method. GetVgpuMetadataFunc func() (nvml.VgpuPgpuMetadata, nvml.Return) // GetVgpuProcessUtilizationFunc mocks the GetVgpuProcessUtilization method. GetVgpuProcessUtilizationFunc func(v uint64) ([]nvml.VgpuProcessUtilizationSample, nvml.Return) + // GetVgpuProcessesUtilizationInfoFunc mocks the GetVgpuProcessesUtilizationInfo method. + GetVgpuProcessesUtilizationInfoFunc func() (nvml.VgpuProcessesUtilizationInfo, nvml.Return) + // GetVgpuSchedulerCapabilitiesFunc mocks the GetVgpuSchedulerCapabilities method. GetVgpuSchedulerCapabilitiesFunc func() (nvml.VgpuSchedulerCapabilities, nvml.Return) @@ -1105,6 +1345,12 @@ type Device struct { // GetVgpuSchedulerStateFunc mocks the GetVgpuSchedulerState method. GetVgpuSchedulerStateFunc func() (nvml.VgpuSchedulerGetState, nvml.Return) + // GetVgpuTypeCreatablePlacementsFunc mocks the GetVgpuTypeCreatablePlacements method. + GetVgpuTypeCreatablePlacementsFunc func(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) + + // GetVgpuTypeSupportedPlacementsFunc mocks the GetVgpuTypeSupportedPlacements method. + GetVgpuTypeSupportedPlacementsFunc func(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) + // GetVgpuUtilizationFunc mocks the GetVgpuUtilization method. GetVgpuUtilizationFunc func(v uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) @@ -1123,15 +1369,30 @@ type Device struct { // GpmQueryDeviceSupportVFunc mocks the GpmQueryDeviceSupportV method. GpmQueryDeviceSupportVFunc func() nvml.GpmSupportV + // GpmQueryIfStreamingEnabledFunc mocks the GpmQueryIfStreamingEnabled method. + GpmQueryIfStreamingEnabledFunc func() (uint32, nvml.Return) + // GpmSampleGetFunc mocks the GpmSampleGet method. GpmSampleGetFunc func(gpmSample nvml.GpmSample) nvml.Return + // GpmSetStreamingEnabledFunc mocks the GpmSetStreamingEnabled method. + GpmSetStreamingEnabledFunc func(v uint32) nvml.Return + // IsMigDeviceHandleFunc mocks the IsMigDeviceHandle method. IsMigDeviceHandleFunc func() (bool, nvml.Return) // OnSameBoardFunc mocks the OnSameBoard method. OnSameBoardFunc func(device nvml.Device) (int, nvml.Return) + // PowerSmoothingActivatePresetProfileFunc mocks the PowerSmoothingActivatePresetProfile method. + PowerSmoothingActivatePresetProfileFunc func(powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return + + // PowerSmoothingSetStateFunc mocks the PowerSmoothingSetState method. + PowerSmoothingSetStateFunc func(powerSmoothingState *nvml.PowerSmoothingState) nvml.Return + + // PowerSmoothingUpdatePresetProfileParamFunc mocks the PowerSmoothingUpdatePresetProfileParam method. + PowerSmoothingUpdatePresetProfileParamFunc func(powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return + // RegisterEventsFunc mocks the RegisterEvents method. RegisterEventsFunc func(v uint64, eventSet nvml.EventSet) nvml.Return @@ -1162,9 +1423,15 @@ type Device struct { // SetAutoBoostedClocksEnabledFunc mocks the SetAutoBoostedClocksEnabled method. SetAutoBoostedClocksEnabledFunc func(enableState nvml.EnableState) nvml.Return + // SetClockOffsetsFunc mocks the SetClockOffsets method. + SetClockOffsetsFunc func(clockOffset nvml.ClockOffset) nvml.Return + // SetComputeModeFunc mocks the SetComputeMode method. SetComputeModeFunc func(computeMode nvml.ComputeMode) nvml.Return + // SetConfComputeUnprotectedMemSizeFunc mocks the SetConfComputeUnprotectedMemSize method. + SetConfComputeUnprotectedMemSizeFunc func(v uint64) nvml.Return + // SetCpuAffinityFunc mocks the SetCpuAffinity method. SetCpuAffinityFunc func() nvml.Return @@ -1174,6 +1441,9 @@ type Device struct { // SetDefaultFanSpeed_v2Func mocks the SetDefaultFanSpeed_v2 method. SetDefaultFanSpeed_v2Func func(n int) nvml.Return + // SetDramEncryptionModeFunc mocks the SetDramEncryptionMode method. + SetDramEncryptionModeFunc func(dramEncryptionInfo *nvml.DramEncryptionInfo) nvml.Return + // SetDriverModelFunc mocks the SetDriverModel method. SetDriverModelFunc func(driverModel nvml.DriverModel, v uint32) nvml.Return @@ -1210,15 +1480,27 @@ type Device struct { // SetNvLinkUtilizationControlFunc mocks the SetNvLinkUtilizationControl method. SetNvLinkUtilizationControlFunc func(n1 int, n2 int, nvLinkUtilizationControl *nvml.NvLinkUtilizationControl, b bool) nvml.Return + // SetNvlinkBwModeFunc mocks the SetNvlinkBwMode method. + SetNvlinkBwModeFunc func(nvlinkSetBwMode *nvml.NvlinkSetBwMode) nvml.Return + // SetPersistenceModeFunc mocks the SetPersistenceMode method. SetPersistenceModeFunc func(enableState nvml.EnableState) nvml.Return // SetPowerManagementLimitFunc mocks the SetPowerManagementLimit method. SetPowerManagementLimitFunc func(v uint32) nvml.Return + // SetPowerManagementLimit_v2Func mocks the SetPowerManagementLimit_v2 method. + SetPowerManagementLimit_v2Func func(powerValue_v2 *nvml.PowerValue_v2) nvml.Return + // SetTemperatureThresholdFunc mocks the SetTemperatureThreshold method. SetTemperatureThresholdFunc func(temperatureThresholds nvml.TemperatureThresholds, n int) nvml.Return + // SetVgpuCapabilitiesFunc mocks the SetVgpuCapabilities method. + SetVgpuCapabilitiesFunc func(deviceVgpuCapability nvml.DeviceVgpuCapability, enableState nvml.EnableState) nvml.Return + + // SetVgpuHeterogeneousModeFunc mocks the SetVgpuHeterogeneousMode method. + SetVgpuHeterogeneousModeFunc func(vgpuHeterogeneousMode nvml.VgpuHeterogeneousMode) nvml.Return + // SetVgpuSchedulerStateFunc mocks the SetVgpuSchedulerState method. SetVgpuSchedulerStateFunc func(vgpuSchedulerSetState *nvml.VgpuSchedulerSetState) nvml.Return @@ -1231,16 +1513,20 @@ type Device struct { // VgpuTypeGetMaxInstancesFunc mocks the VgpuTypeGetMaxInstances method. VgpuTypeGetMaxInstancesFunc func(vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) + // WorkloadPowerProfileClearRequestedProfilesFunc mocks the WorkloadPowerProfileClearRequestedProfiles method. + WorkloadPowerProfileClearRequestedProfilesFunc func(workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return + + // WorkloadPowerProfileGetCurrentProfilesFunc mocks the WorkloadPowerProfileGetCurrentProfiles method. + WorkloadPowerProfileGetCurrentProfilesFunc func() (nvml.WorkloadPowerProfileCurrentProfiles, nvml.Return) + + // WorkloadPowerProfileGetProfilesInfoFunc mocks the WorkloadPowerProfileGetProfilesInfo method. + WorkloadPowerProfileGetProfilesInfoFunc func() (nvml.WorkloadPowerProfileProfilesInfo, nvml.Return) + + // WorkloadPowerProfileSetRequestedProfilesFunc mocks the WorkloadPowerProfileSetRequestedProfiles method. + WorkloadPowerProfileSetRequestedProfilesFunc func(workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return + // calls tracks calls to the methods. calls struct { - // CcuGetStreamState holds details about calls to the CcuGetStreamState method. - CcuGetStreamState []struct { - } - // CcuSetStreamState holds details about calls to the CcuSetStreamState method. - CcuSetStreamState []struct { - // N is the n argument value. - N int - } // ClearAccountingPids holds details about calls to the ClearAccountingPids method. ClearAccountingPids []struct { } @@ -1335,6 +1621,12 @@ type Device struct { // GetBusType holds details about calls to the GetBusType method. GetBusType []struct { } + // GetC2cModeInfoV holds details about calls to the GetC2cModeInfoV method. + GetC2cModeInfoV []struct { + } + // GetCapabilities holds details about calls to the GetCapabilities method. + GetCapabilities []struct { + } // GetClkMonStatus holds details about calls to the GetClkMonStatus method. GetClkMonStatus []struct { } @@ -1350,6 +1642,9 @@ type Device struct { // ClockType is the clockType argument value. ClockType nvml.ClockType } + // GetClockOffsets holds details about calls to the GetClockOffsets method. + GetClockOffsets []struct { + } // GetComputeInstanceId holds details about calls to the GetComputeInstanceId method. GetComputeInstanceId []struct { } @@ -1359,6 +1654,21 @@ type Device struct { // GetComputeRunningProcesses holds details about calls to the GetComputeRunningProcesses method. GetComputeRunningProcesses []struct { } + // GetConfComputeGpuAttestationReport holds details about calls to the GetConfComputeGpuAttestationReport method. + GetConfComputeGpuAttestationReport []struct { + } + // GetConfComputeGpuCertificate holds details about calls to the GetConfComputeGpuCertificate method. + GetConfComputeGpuCertificate []struct { + } + // GetConfComputeMemSizeInfo holds details about calls to the GetConfComputeMemSizeInfo method. + GetConfComputeMemSizeInfo []struct { + } + // GetConfComputeProtectedMemoryUsage holds details about calls to the GetConfComputeProtectedMemoryUsage method. + GetConfComputeProtectedMemoryUsage []struct { + } + // GetCoolerInfo holds details about calls to the GetCoolerInfo method. + GetCoolerInfo []struct { + } // GetCpuAffinity holds details about calls to the GetCpuAffinity method. GetCpuAffinity []struct { // N is the n argument value. @@ -1383,6 +1693,12 @@ type Device struct { // GetCurrPcieLinkWidth holds details about calls to the GetCurrPcieLinkWidth method. GetCurrPcieLinkWidth []struct { } + // GetCurrentClockFreqs holds details about calls to the GetCurrentClockFreqs method. + GetCurrentClockFreqs []struct { + } + // GetCurrentClocksEventReasons holds details about calls to the GetCurrentClocksEventReasons method. + GetCurrentClocksEventReasons []struct { + } // GetCurrentClocksThrottleReasons holds details about calls to the GetCurrentClocksThrottleReasons method. GetCurrentClocksThrottleReasons []struct { } @@ -1413,9 +1729,15 @@ type Device struct { // GetDisplayMode holds details about calls to the GetDisplayMode method. GetDisplayMode []struct { } + // GetDramEncryptionMode holds details about calls to the GetDramEncryptionMode method. + GetDramEncryptionMode []struct { + } // GetDriverModel holds details about calls to the GetDriverModel method. GetDriverModel []struct { } + // GetDriverModel_v2 holds details about calls to the GetDriverModel_v2 method. + GetDriverModel_v2 []struct { + } // GetDynamicPstatesInfo holds details about calls to the GetDynamicPstatesInfo method. GetDynamicPstatesInfo []struct { } @@ -1453,6 +1775,9 @@ type Device struct { // GetFanSpeed holds details about calls to the GetFanSpeed method. GetFanSpeed []struct { } + // GetFanSpeedRPM holds details about calls to the GetFanSpeedRPM method. + GetFanSpeedRPM []struct { + } // GetFanSpeed_v2 holds details about calls to the GetFanSpeed_v2 method. GetFanSpeed_v2 []struct { // N is the n argument value. @@ -1472,6 +1797,9 @@ type Device struct { // GetGpuFabricInfo holds details about calls to the GetGpuFabricInfo method. GetGpuFabricInfo []struct { } + // GetGpuFabricInfoV holds details about calls to the GetGpuFabricInfoV method. + GetGpuFabricInfoV []struct { + } // GetGpuInstanceById holds details about calls to the GetGpuInstanceById method. GetGpuInstanceById []struct { // N is the n argument value. @@ -1543,9 +1871,18 @@ type Device struct { // GetIrqNum holds details about calls to the GetIrqNum method. GetIrqNum []struct { } + // GetJpgUtilization holds details about calls to the GetJpgUtilization method. + GetJpgUtilization []struct { + } + // GetLastBBXFlushTime holds details about calls to the GetLastBBXFlushTime method. + GetLastBBXFlushTime []struct { + } // GetMPSComputeRunningProcesses holds details about calls to the GetMPSComputeRunningProcesses method. GetMPSComputeRunningProcesses []struct { } + // GetMarginTemperature holds details about calls to the GetMarginTemperature method. + GetMarginTemperature []struct { + } // GetMaxClockInfo holds details about calls to the GetMaxClockInfo method. GetMaxClockInfo []struct { // ClockType is the clockType argument value. @@ -1617,6 +1954,9 @@ type Device struct { // GetMinorNumber holds details about calls to the GetMinorNumber method. GetMinorNumber []struct { } + // GetModuleId holds details about calls to the GetModuleId method. + GetModuleId []struct { + } // GetMultiGpuBoard holds details about calls to the GetMultiGpuBoard method. GetMultiGpuBoard []struct { } @@ -1629,6 +1969,9 @@ type Device struct { // GetNumGpuCores holds details about calls to the GetNumGpuCores method. GetNumGpuCores []struct { } + // GetNumaNodeId holds details about calls to the GetNumaNodeId method. + GetNumaNodeId []struct { + } // GetNvLinkCapability holds details about calls to the GetNvLinkCapability method. GetNvLinkCapability []struct { // N is the n argument value. @@ -1677,6 +2020,15 @@ type Device struct { // N is the n argument value. N int } + // GetNvlinkBwMode holds details about calls to the GetNvlinkBwMode method. + GetNvlinkBwMode []struct { + } + // GetNvlinkSupportedBwModes holds details about calls to the GetNvlinkSupportedBwModes method. + GetNvlinkSupportedBwModes []struct { + } + // GetOfaUtilization holds details about calls to the GetOfaUtilization method. + GetOfaUtilization []struct { + } // GetP2PStatus holds details about calls to the GetP2PStatus method. GetP2PStatus []struct { // Device is the device argument value. @@ -1687,6 +2039,9 @@ type Device struct { // GetPciInfo holds details about calls to the GetPciInfo method. GetPciInfo []struct { } + // GetPciInfoExt holds details about calls to the GetPciInfoExt method. + GetPciInfoExt []struct { + } // GetPcieLinkMaxSpeed holds details about calls to the GetPcieLinkMaxSpeed method. GetPcieLinkMaxSpeed []struct { } @@ -1701,6 +2056,9 @@ type Device struct { // PcieUtilCounter is the pcieUtilCounter argument value. PcieUtilCounter nvml.PcieUtilCounter } + // GetPerformanceModes holds details about calls to the GetPerformanceModes method. + GetPerformanceModes []struct { + } // GetPerformanceState holds details about calls to the GetPerformanceState method. GetPerformanceState []struct { } @@ -1710,6 +2068,9 @@ type Device struct { // GetPgpuMetadataString holds details about calls to the GetPgpuMetadataString method. GetPgpuMetadataString []struct { } + // GetPlatformInfo holds details about calls to the GetPlatformInfo method. + GetPlatformInfo []struct { + } // GetPowerManagementDefaultLimit holds details about calls to the GetPowerManagementDefaultLimit method. GetPowerManagementDefaultLimit []struct { } @@ -1736,6 +2097,9 @@ type Device struct { // V is the v argument value. V uint64 } + // GetProcessesUtilizationInfo holds details about calls to the GetProcessesUtilizationInfo method. + GetProcessesUtilizationInfo []struct { + } // GetRemappedRows holds details about calls to the GetRemappedRows method. GetRemappedRows []struct { } @@ -1755,6 +2119,9 @@ type Device struct { // GetRowRemapperHistogram holds details about calls to the GetRowRemapperHistogram method. GetRowRemapperHistogram []struct { } + // GetRunningProcessDetailList holds details about calls to the GetRunningProcessDetailList method. + GetRunningProcessDetailList []struct { + } // GetSamples holds details about calls to the GetSamples method. GetSamples []struct { // SamplingType is the samplingType argument value. @@ -1765,6 +2132,12 @@ type Device struct { // GetSerial holds details about calls to the GetSerial method. GetSerial []struct { } + // GetSramEccErrorStatus holds details about calls to the GetSramEccErrorStatus method. + GetSramEccErrorStatus []struct { + } + // GetSupportedClocksEventReasons holds details about calls to the GetSupportedClocksEventReasons method. + GetSupportedClocksEventReasons []struct { + } // GetSupportedClocksThrottleReasons holds details about calls to the GetSupportedClocksThrottleReasons method. GetSupportedClocksThrottleReasons []struct { } @@ -1800,6 +2173,9 @@ type Device struct { // TemperatureThresholds is the temperatureThresholds argument value. TemperatureThresholds nvml.TemperatureThresholds } + // GetTemperatureV holds details about calls to the GetTemperatureV method. + GetTemperatureV []struct { + } // GetThermalSettings holds details about calls to the GetThermalSettings method. GetThermalSettings []struct { // V is the v argument value. @@ -1839,6 +2215,12 @@ type Device struct { // DeviceVgpuCapability is the deviceVgpuCapability argument value. DeviceVgpuCapability nvml.DeviceVgpuCapability } + // GetVgpuHeterogeneousMode holds details about calls to the GetVgpuHeterogeneousMode method. + GetVgpuHeterogeneousMode []struct { + } + // GetVgpuInstancesUtilizationInfo holds details about calls to the GetVgpuInstancesUtilizationInfo method. + GetVgpuInstancesUtilizationInfo []struct { + } // GetVgpuMetadata holds details about calls to the GetVgpuMetadata method. GetVgpuMetadata []struct { } @@ -1847,6 +2229,9 @@ type Device struct { // V is the v argument value. V uint64 } + // GetVgpuProcessesUtilizationInfo holds details about calls to the GetVgpuProcessesUtilizationInfo method. + GetVgpuProcessesUtilizationInfo []struct { + } // GetVgpuSchedulerCapabilities holds details about calls to the GetVgpuSchedulerCapabilities method. GetVgpuSchedulerCapabilities []struct { } @@ -1856,6 +2241,16 @@ type Device struct { // GetVgpuSchedulerState holds details about calls to the GetVgpuSchedulerState method. GetVgpuSchedulerState []struct { } + // GetVgpuTypeCreatablePlacements holds details about calls to the GetVgpuTypeCreatablePlacements method. + GetVgpuTypeCreatablePlacements []struct { + // VgpuTypeId is the vgpuTypeId argument value. + VgpuTypeId nvml.VgpuTypeId + } + // GetVgpuTypeSupportedPlacements holds details about calls to the GetVgpuTypeSupportedPlacements method. + GetVgpuTypeSupportedPlacements []struct { + // VgpuTypeId is the vgpuTypeId argument value. + VgpuTypeId nvml.VgpuTypeId + } // GetVgpuUtilization holds details about calls to the GetVgpuUtilization method. GetVgpuUtilization []struct { // V is the v argument value. @@ -1882,11 +2277,19 @@ type Device struct { // GpmQueryDeviceSupportV holds details about calls to the GpmQueryDeviceSupportV method. GpmQueryDeviceSupportV []struct { } + // GpmQueryIfStreamingEnabled holds details about calls to the GpmQueryIfStreamingEnabled method. + GpmQueryIfStreamingEnabled []struct { + } // GpmSampleGet holds details about calls to the GpmSampleGet method. GpmSampleGet []struct { // GpmSample is the gpmSample argument value. GpmSample nvml.GpmSample } + // GpmSetStreamingEnabled holds details about calls to the GpmSetStreamingEnabled method. + GpmSetStreamingEnabled []struct { + // V is the v argument value. + V uint32 + } // IsMigDeviceHandle holds details about calls to the IsMigDeviceHandle method. IsMigDeviceHandle []struct { } @@ -1895,6 +2298,21 @@ type Device struct { // Device is the device argument value. Device nvml.Device } + // PowerSmoothingActivatePresetProfile holds details about calls to the PowerSmoothingActivatePresetProfile method. + PowerSmoothingActivatePresetProfile []struct { + // PowerSmoothingProfile is the powerSmoothingProfile argument value. + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } + // PowerSmoothingSetState holds details about calls to the PowerSmoothingSetState method. + PowerSmoothingSetState []struct { + // PowerSmoothingState is the powerSmoothingState argument value. + PowerSmoothingState *nvml.PowerSmoothingState + } + // PowerSmoothingUpdatePresetProfileParam holds details about calls to the PowerSmoothingUpdatePresetProfileParam method. + PowerSmoothingUpdatePresetProfileParam []struct { + // PowerSmoothingProfile is the powerSmoothingProfile argument value. + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } // RegisterEvents holds details about calls to the RegisterEvents method. RegisterEvents []struct { // V is the v argument value. @@ -1947,11 +2365,21 @@ type Device struct { // EnableState is the enableState argument value. EnableState nvml.EnableState } + // SetClockOffsets holds details about calls to the SetClockOffsets method. + SetClockOffsets []struct { + // ClockOffset is the clockOffset argument value. + ClockOffset nvml.ClockOffset + } // SetComputeMode holds details about calls to the SetComputeMode method. SetComputeMode []struct { // ComputeMode is the computeMode argument value. ComputeMode nvml.ComputeMode } + // SetConfComputeUnprotectedMemSize holds details about calls to the SetConfComputeUnprotectedMemSize method. + SetConfComputeUnprotectedMemSize []struct { + // V is the v argument value. + V uint64 + } // SetCpuAffinity holds details about calls to the SetCpuAffinity method. SetCpuAffinity []struct { } @@ -1967,6 +2395,11 @@ type Device struct { // N is the n argument value. N int } + // SetDramEncryptionMode holds details about calls to the SetDramEncryptionMode method. + SetDramEncryptionMode []struct { + // DramEncryptionInfo is the dramEncryptionInfo argument value. + DramEncryptionInfo *nvml.DramEncryptionInfo + } // SetDriverModel holds details about calls to the SetDriverModel method. SetDriverModel []struct { // DriverModel is the driverModel argument value. @@ -2043,6 +2476,11 @@ type Device struct { // B is the b argument value. B bool } + // SetNvlinkBwMode holds details about calls to the SetNvlinkBwMode method. + SetNvlinkBwMode []struct { + // NvlinkSetBwMode is the nvlinkSetBwMode argument value. + NvlinkSetBwMode *nvml.NvlinkSetBwMode + } // SetPersistenceMode holds details about calls to the SetPersistenceMode method. SetPersistenceMode []struct { // EnableState is the enableState argument value. @@ -2053,6 +2491,11 @@ type Device struct { // V is the v argument value. V uint32 } + // SetPowerManagementLimit_v2 holds details about calls to the SetPowerManagementLimit_v2 method. + SetPowerManagementLimit_v2 []struct { + // PowerValue_v2 is the powerValue_v2 argument value. + PowerValue_v2 *nvml.PowerValue_v2 + } // SetTemperatureThreshold holds details about calls to the SetTemperatureThreshold method. SetTemperatureThreshold []struct { // TemperatureThresholds is the temperatureThresholds argument value. @@ -2060,6 +2503,18 @@ type Device struct { // N is the n argument value. N int } + // SetVgpuCapabilities holds details about calls to the SetVgpuCapabilities method. + SetVgpuCapabilities []struct { + // DeviceVgpuCapability is the deviceVgpuCapability argument value. + DeviceVgpuCapability nvml.DeviceVgpuCapability + // EnableState is the enableState argument value. + EnableState nvml.EnableState + } + // SetVgpuHeterogeneousMode holds details about calls to the SetVgpuHeterogeneousMode method. + SetVgpuHeterogeneousMode []struct { + // VgpuHeterogeneousMode is the vgpuHeterogeneousMode argument value. + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode + } // SetVgpuSchedulerState holds details about calls to the SetVgpuSchedulerState method. SetVgpuSchedulerState []struct { // VgpuSchedulerSetState is the vgpuSchedulerSetState argument value. @@ -2078,267 +2533,273 @@ type Device struct { // VgpuTypeId is the vgpuTypeId argument value. VgpuTypeId nvml.VgpuTypeId } - } - lockCcuGetStreamState sync.RWMutex - lockCcuSetStreamState sync.RWMutex - lockClearAccountingPids sync.RWMutex - lockClearCpuAffinity sync.RWMutex - lockClearEccErrorCounts sync.RWMutex - lockClearFieldValues sync.RWMutex - lockCreateGpuInstance sync.RWMutex - lockCreateGpuInstanceWithPlacement sync.RWMutex - lockFreezeNvLinkUtilizationCounter sync.RWMutex - lockGetAPIRestriction sync.RWMutex - lockGetAccountingBufferSize sync.RWMutex - lockGetAccountingMode sync.RWMutex - lockGetAccountingPids sync.RWMutex - lockGetAccountingStats sync.RWMutex - lockGetActiveVgpus sync.RWMutex - lockGetAdaptiveClockInfoStatus sync.RWMutex - lockGetApplicationsClock sync.RWMutex - lockGetArchitecture sync.RWMutex - lockGetAttributes sync.RWMutex - lockGetAutoBoostedClocksEnabled sync.RWMutex - lockGetBAR1MemoryInfo sync.RWMutex - lockGetBoardId sync.RWMutex - lockGetBoardPartNumber sync.RWMutex - lockGetBrand sync.RWMutex - lockGetBridgeChipInfo sync.RWMutex - lockGetBusType sync.RWMutex - lockGetClkMonStatus sync.RWMutex - lockGetClock sync.RWMutex - lockGetClockInfo sync.RWMutex - lockGetComputeInstanceId sync.RWMutex - lockGetComputeMode sync.RWMutex - lockGetComputeRunningProcesses sync.RWMutex - lockGetCpuAffinity sync.RWMutex - lockGetCpuAffinityWithinScope sync.RWMutex - lockGetCreatableVgpus sync.RWMutex - lockGetCudaComputeCapability sync.RWMutex - lockGetCurrPcieLinkGeneration sync.RWMutex - lockGetCurrPcieLinkWidth sync.RWMutex - lockGetCurrentClocksThrottleReasons sync.RWMutex - lockGetDecoderUtilization sync.RWMutex - lockGetDefaultApplicationsClock sync.RWMutex - lockGetDefaultEccMode sync.RWMutex - lockGetDetailedEccErrors sync.RWMutex - lockGetDeviceHandleFromMigDeviceHandle sync.RWMutex - lockGetDisplayActive sync.RWMutex - lockGetDisplayMode sync.RWMutex - lockGetDriverModel sync.RWMutex - lockGetDynamicPstatesInfo sync.RWMutex - lockGetEccMode sync.RWMutex - lockGetEncoderCapacity sync.RWMutex - lockGetEncoderSessions sync.RWMutex - lockGetEncoderStats sync.RWMutex - lockGetEncoderUtilization sync.RWMutex - lockGetEnforcedPowerLimit sync.RWMutex - lockGetFBCSessions sync.RWMutex - lockGetFBCStats sync.RWMutex - lockGetFanControlPolicy_v2 sync.RWMutex - lockGetFanSpeed sync.RWMutex - lockGetFanSpeed_v2 sync.RWMutex - lockGetFieldValues sync.RWMutex - lockGetGpcClkMinMaxVfOffset sync.RWMutex - lockGetGpcClkVfOffset sync.RWMutex - lockGetGpuFabricInfo sync.RWMutex - lockGetGpuInstanceById sync.RWMutex - lockGetGpuInstanceId sync.RWMutex - lockGetGpuInstancePossiblePlacements sync.RWMutex - lockGetGpuInstanceProfileInfo sync.RWMutex - lockGetGpuInstanceProfileInfoV sync.RWMutex - lockGetGpuInstanceRemainingCapacity sync.RWMutex - lockGetGpuInstances sync.RWMutex - lockGetGpuMaxPcieLinkGeneration sync.RWMutex - lockGetGpuOperationMode sync.RWMutex - lockGetGraphicsRunningProcesses sync.RWMutex - lockGetGridLicensableFeatures sync.RWMutex - lockGetGspFirmwareMode sync.RWMutex - lockGetGspFirmwareVersion sync.RWMutex - lockGetHostVgpuMode sync.RWMutex - lockGetIndex sync.RWMutex - lockGetInforomConfigurationChecksum sync.RWMutex - lockGetInforomImageVersion sync.RWMutex - lockGetInforomVersion sync.RWMutex - lockGetIrqNum sync.RWMutex - lockGetMPSComputeRunningProcesses sync.RWMutex - lockGetMaxClockInfo sync.RWMutex - lockGetMaxCustomerBoostClock sync.RWMutex - lockGetMaxMigDeviceCount sync.RWMutex - lockGetMaxPcieLinkGeneration sync.RWMutex - lockGetMaxPcieLinkWidth sync.RWMutex - lockGetMemClkMinMaxVfOffset sync.RWMutex - lockGetMemClkVfOffset sync.RWMutex - lockGetMemoryAffinity sync.RWMutex - lockGetMemoryBusWidth sync.RWMutex - lockGetMemoryErrorCounter sync.RWMutex - lockGetMemoryInfo sync.RWMutex - lockGetMemoryInfo_v2 sync.RWMutex - lockGetMigDeviceHandleByIndex sync.RWMutex - lockGetMigMode sync.RWMutex - lockGetMinMaxClockOfPState sync.RWMutex - lockGetMinMaxFanSpeed sync.RWMutex - lockGetMinorNumber sync.RWMutex - lockGetMultiGpuBoard sync.RWMutex - lockGetName sync.RWMutex - lockGetNumFans sync.RWMutex - lockGetNumGpuCores sync.RWMutex - lockGetNvLinkCapability sync.RWMutex - lockGetNvLinkErrorCounter sync.RWMutex - lockGetNvLinkRemoteDeviceType sync.RWMutex - lockGetNvLinkRemotePciInfo sync.RWMutex - lockGetNvLinkState sync.RWMutex - lockGetNvLinkUtilizationControl sync.RWMutex - lockGetNvLinkUtilizationCounter sync.RWMutex - lockGetNvLinkVersion sync.RWMutex - lockGetP2PStatus sync.RWMutex - lockGetPciInfo sync.RWMutex - lockGetPcieLinkMaxSpeed sync.RWMutex - lockGetPcieReplayCounter sync.RWMutex - lockGetPcieSpeed sync.RWMutex - lockGetPcieThroughput sync.RWMutex - lockGetPerformanceState sync.RWMutex - lockGetPersistenceMode sync.RWMutex - lockGetPgpuMetadataString sync.RWMutex - lockGetPowerManagementDefaultLimit sync.RWMutex - lockGetPowerManagementLimit sync.RWMutex - lockGetPowerManagementLimitConstraints sync.RWMutex - lockGetPowerManagementMode sync.RWMutex - lockGetPowerSource sync.RWMutex - lockGetPowerState sync.RWMutex - lockGetPowerUsage sync.RWMutex - lockGetProcessUtilization sync.RWMutex - lockGetRemappedRows sync.RWMutex - lockGetRetiredPages sync.RWMutex - lockGetRetiredPagesPendingStatus sync.RWMutex - lockGetRetiredPages_v2 sync.RWMutex - lockGetRowRemapperHistogram sync.RWMutex - lockGetSamples sync.RWMutex - lockGetSerial sync.RWMutex - lockGetSupportedClocksThrottleReasons sync.RWMutex - lockGetSupportedEventTypes sync.RWMutex - lockGetSupportedGraphicsClocks sync.RWMutex - lockGetSupportedMemoryClocks sync.RWMutex - lockGetSupportedPerformanceStates sync.RWMutex - lockGetSupportedVgpus sync.RWMutex - lockGetTargetFanSpeed sync.RWMutex - lockGetTemperature sync.RWMutex - lockGetTemperatureThreshold sync.RWMutex - lockGetThermalSettings sync.RWMutex - lockGetTopologyCommonAncestor sync.RWMutex - lockGetTopologyNearestGpus sync.RWMutex - lockGetTotalEccErrors sync.RWMutex - lockGetTotalEnergyConsumption sync.RWMutex - lockGetUUID sync.RWMutex - lockGetUtilizationRates sync.RWMutex - lockGetVbiosVersion sync.RWMutex - lockGetVgpuCapabilities sync.RWMutex - lockGetVgpuMetadata sync.RWMutex - lockGetVgpuProcessUtilization sync.RWMutex - lockGetVgpuSchedulerCapabilities sync.RWMutex - lockGetVgpuSchedulerLog sync.RWMutex - lockGetVgpuSchedulerState sync.RWMutex - lockGetVgpuUtilization sync.RWMutex - lockGetViolationStatus sync.RWMutex - lockGetVirtualizationMode sync.RWMutex - lockGpmMigSampleGet sync.RWMutex - lockGpmQueryDeviceSupport sync.RWMutex - lockGpmQueryDeviceSupportV sync.RWMutex - lockGpmSampleGet sync.RWMutex - lockIsMigDeviceHandle sync.RWMutex - lockOnSameBoard sync.RWMutex - lockRegisterEvents sync.RWMutex - lockResetApplicationsClocks sync.RWMutex - lockResetGpuLockedClocks sync.RWMutex - lockResetMemoryLockedClocks sync.RWMutex - lockResetNvLinkErrorCounters sync.RWMutex - lockResetNvLinkUtilizationCounter sync.RWMutex - lockSetAPIRestriction sync.RWMutex - lockSetAccountingMode sync.RWMutex - lockSetApplicationsClocks sync.RWMutex - lockSetAutoBoostedClocksEnabled sync.RWMutex - lockSetComputeMode sync.RWMutex - lockSetCpuAffinity sync.RWMutex - lockSetDefaultAutoBoostedClocksEnabled sync.RWMutex - lockSetDefaultFanSpeed_v2 sync.RWMutex - lockSetDriverModel sync.RWMutex - lockSetEccMode sync.RWMutex - lockSetFanControlPolicy sync.RWMutex - lockSetFanSpeed_v2 sync.RWMutex - lockSetGpcClkVfOffset sync.RWMutex - lockSetGpuLockedClocks sync.RWMutex - lockSetGpuOperationMode sync.RWMutex - lockSetMemClkVfOffset sync.RWMutex - lockSetMemoryLockedClocks sync.RWMutex - lockSetMigMode sync.RWMutex - lockSetNvLinkDeviceLowPowerThreshold sync.RWMutex - lockSetNvLinkUtilizationControl sync.RWMutex - lockSetPersistenceMode sync.RWMutex - lockSetPowerManagementLimit sync.RWMutex - lockSetTemperatureThreshold sync.RWMutex - lockSetVgpuSchedulerState sync.RWMutex - lockSetVirtualizationMode sync.RWMutex - lockValidateInforom sync.RWMutex - lockVgpuTypeGetMaxInstances sync.RWMutex -} - -// CcuGetStreamState calls CcuGetStreamStateFunc. -func (mock *Device) CcuGetStreamState() (int, nvml.Return) { - if mock.CcuGetStreamStateFunc == nil { - panic("Device.CcuGetStreamStateFunc: method is nil but Device.CcuGetStreamState was just called") - } - callInfo := struct { - }{} - mock.lockCcuGetStreamState.Lock() - mock.calls.CcuGetStreamState = append(mock.calls.CcuGetStreamState, callInfo) - mock.lockCcuGetStreamState.Unlock() - return mock.CcuGetStreamStateFunc() -} - -// CcuGetStreamStateCalls gets all the calls that were made to CcuGetStreamState. -// Check the length with: -// -// len(mockedDevice.CcuGetStreamStateCalls()) -func (mock *Device) CcuGetStreamStateCalls() []struct { -} { - var calls []struct { - } - mock.lockCcuGetStreamState.RLock() - calls = mock.calls.CcuGetStreamState - mock.lockCcuGetStreamState.RUnlock() - return calls -} - -// CcuSetStreamState calls CcuSetStreamStateFunc. -func (mock *Device) CcuSetStreamState(n int) nvml.Return { - if mock.CcuSetStreamStateFunc == nil { - panic("Device.CcuSetStreamStateFunc: method is nil but Device.CcuSetStreamState was just called") - } - callInfo := struct { - N int - }{ - N: n, - } - mock.lockCcuSetStreamState.Lock() - mock.calls.CcuSetStreamState = append(mock.calls.CcuSetStreamState, callInfo) - mock.lockCcuSetStreamState.Unlock() - return mock.CcuSetStreamStateFunc(n) -} - -// CcuSetStreamStateCalls gets all the calls that were made to CcuSetStreamState. -// Check the length with: -// -// len(mockedDevice.CcuSetStreamStateCalls()) -func (mock *Device) CcuSetStreamStateCalls() []struct { - N int -} { - var calls []struct { - N int - } - mock.lockCcuSetStreamState.RLock() - calls = mock.calls.CcuSetStreamState - mock.lockCcuSetStreamState.RUnlock() - return calls + // WorkloadPowerProfileClearRequestedProfiles holds details about calls to the WorkloadPowerProfileClearRequestedProfiles method. + WorkloadPowerProfileClearRequestedProfiles []struct { + // WorkloadPowerProfileRequestedProfiles is the workloadPowerProfileRequestedProfiles argument value. + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + // WorkloadPowerProfileGetCurrentProfiles holds details about calls to the WorkloadPowerProfileGetCurrentProfiles method. + WorkloadPowerProfileGetCurrentProfiles []struct { + } + // WorkloadPowerProfileGetProfilesInfo holds details about calls to the WorkloadPowerProfileGetProfilesInfo method. + WorkloadPowerProfileGetProfilesInfo []struct { + } + // WorkloadPowerProfileSetRequestedProfiles holds details about calls to the WorkloadPowerProfileSetRequestedProfiles method. + WorkloadPowerProfileSetRequestedProfiles []struct { + // WorkloadPowerProfileRequestedProfiles is the workloadPowerProfileRequestedProfiles argument value. + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + } + lockClearAccountingPids sync.RWMutex + lockClearCpuAffinity sync.RWMutex + lockClearEccErrorCounts sync.RWMutex + lockClearFieldValues sync.RWMutex + lockCreateGpuInstance sync.RWMutex + lockCreateGpuInstanceWithPlacement sync.RWMutex + lockFreezeNvLinkUtilizationCounter sync.RWMutex + lockGetAPIRestriction sync.RWMutex + lockGetAccountingBufferSize sync.RWMutex + lockGetAccountingMode sync.RWMutex + lockGetAccountingPids sync.RWMutex + lockGetAccountingStats sync.RWMutex + lockGetActiveVgpus sync.RWMutex + lockGetAdaptiveClockInfoStatus sync.RWMutex + lockGetApplicationsClock sync.RWMutex + lockGetArchitecture sync.RWMutex + lockGetAttributes sync.RWMutex + lockGetAutoBoostedClocksEnabled sync.RWMutex + lockGetBAR1MemoryInfo sync.RWMutex + lockGetBoardId sync.RWMutex + lockGetBoardPartNumber sync.RWMutex + lockGetBrand sync.RWMutex + lockGetBridgeChipInfo sync.RWMutex + lockGetBusType sync.RWMutex + lockGetC2cModeInfoV sync.RWMutex + lockGetCapabilities sync.RWMutex + lockGetClkMonStatus sync.RWMutex + lockGetClock sync.RWMutex + lockGetClockInfo sync.RWMutex + lockGetClockOffsets sync.RWMutex + lockGetComputeInstanceId sync.RWMutex + lockGetComputeMode sync.RWMutex + lockGetComputeRunningProcesses sync.RWMutex + lockGetConfComputeGpuAttestationReport sync.RWMutex + lockGetConfComputeGpuCertificate sync.RWMutex + lockGetConfComputeMemSizeInfo sync.RWMutex + lockGetConfComputeProtectedMemoryUsage sync.RWMutex + lockGetCoolerInfo sync.RWMutex + lockGetCpuAffinity sync.RWMutex + lockGetCpuAffinityWithinScope sync.RWMutex + lockGetCreatableVgpus sync.RWMutex + lockGetCudaComputeCapability sync.RWMutex + lockGetCurrPcieLinkGeneration sync.RWMutex + lockGetCurrPcieLinkWidth sync.RWMutex + lockGetCurrentClockFreqs sync.RWMutex + lockGetCurrentClocksEventReasons sync.RWMutex + lockGetCurrentClocksThrottleReasons sync.RWMutex + lockGetDecoderUtilization sync.RWMutex + lockGetDefaultApplicationsClock sync.RWMutex + lockGetDefaultEccMode sync.RWMutex + lockGetDetailedEccErrors sync.RWMutex + lockGetDeviceHandleFromMigDeviceHandle sync.RWMutex + lockGetDisplayActive sync.RWMutex + lockGetDisplayMode sync.RWMutex + lockGetDramEncryptionMode sync.RWMutex + lockGetDriverModel sync.RWMutex + lockGetDriverModel_v2 sync.RWMutex + lockGetDynamicPstatesInfo sync.RWMutex + lockGetEccMode sync.RWMutex + lockGetEncoderCapacity sync.RWMutex + lockGetEncoderSessions sync.RWMutex + lockGetEncoderStats sync.RWMutex + lockGetEncoderUtilization sync.RWMutex + lockGetEnforcedPowerLimit sync.RWMutex + lockGetFBCSessions sync.RWMutex + lockGetFBCStats sync.RWMutex + lockGetFanControlPolicy_v2 sync.RWMutex + lockGetFanSpeed sync.RWMutex + lockGetFanSpeedRPM sync.RWMutex + lockGetFanSpeed_v2 sync.RWMutex + lockGetFieldValues sync.RWMutex + lockGetGpcClkMinMaxVfOffset sync.RWMutex + lockGetGpcClkVfOffset sync.RWMutex + lockGetGpuFabricInfo sync.RWMutex + lockGetGpuFabricInfoV sync.RWMutex + lockGetGpuInstanceById sync.RWMutex + lockGetGpuInstanceId sync.RWMutex + lockGetGpuInstancePossiblePlacements sync.RWMutex + lockGetGpuInstanceProfileInfo sync.RWMutex + lockGetGpuInstanceProfileInfoV sync.RWMutex + lockGetGpuInstanceRemainingCapacity sync.RWMutex + lockGetGpuInstances sync.RWMutex + lockGetGpuMaxPcieLinkGeneration sync.RWMutex + lockGetGpuOperationMode sync.RWMutex + lockGetGraphicsRunningProcesses sync.RWMutex + lockGetGridLicensableFeatures sync.RWMutex + lockGetGspFirmwareMode sync.RWMutex + lockGetGspFirmwareVersion sync.RWMutex + lockGetHostVgpuMode sync.RWMutex + lockGetIndex sync.RWMutex + lockGetInforomConfigurationChecksum sync.RWMutex + lockGetInforomImageVersion sync.RWMutex + lockGetInforomVersion sync.RWMutex + lockGetIrqNum sync.RWMutex + lockGetJpgUtilization sync.RWMutex + lockGetLastBBXFlushTime sync.RWMutex + lockGetMPSComputeRunningProcesses sync.RWMutex + lockGetMarginTemperature sync.RWMutex + lockGetMaxClockInfo sync.RWMutex + lockGetMaxCustomerBoostClock sync.RWMutex + lockGetMaxMigDeviceCount sync.RWMutex + lockGetMaxPcieLinkGeneration sync.RWMutex + lockGetMaxPcieLinkWidth sync.RWMutex + lockGetMemClkMinMaxVfOffset sync.RWMutex + lockGetMemClkVfOffset sync.RWMutex + lockGetMemoryAffinity sync.RWMutex + lockGetMemoryBusWidth sync.RWMutex + lockGetMemoryErrorCounter sync.RWMutex + lockGetMemoryInfo sync.RWMutex + lockGetMemoryInfo_v2 sync.RWMutex + lockGetMigDeviceHandleByIndex sync.RWMutex + lockGetMigMode sync.RWMutex + lockGetMinMaxClockOfPState sync.RWMutex + lockGetMinMaxFanSpeed sync.RWMutex + lockGetMinorNumber sync.RWMutex + lockGetModuleId sync.RWMutex + lockGetMultiGpuBoard sync.RWMutex + lockGetName sync.RWMutex + lockGetNumFans sync.RWMutex + lockGetNumGpuCores sync.RWMutex + lockGetNumaNodeId sync.RWMutex + lockGetNvLinkCapability sync.RWMutex + lockGetNvLinkErrorCounter sync.RWMutex + lockGetNvLinkRemoteDeviceType sync.RWMutex + lockGetNvLinkRemotePciInfo sync.RWMutex + lockGetNvLinkState sync.RWMutex + lockGetNvLinkUtilizationControl sync.RWMutex + lockGetNvLinkUtilizationCounter sync.RWMutex + lockGetNvLinkVersion sync.RWMutex + lockGetNvlinkBwMode sync.RWMutex + lockGetNvlinkSupportedBwModes sync.RWMutex + lockGetOfaUtilization sync.RWMutex + lockGetP2PStatus sync.RWMutex + lockGetPciInfo sync.RWMutex + lockGetPciInfoExt sync.RWMutex + lockGetPcieLinkMaxSpeed sync.RWMutex + lockGetPcieReplayCounter sync.RWMutex + lockGetPcieSpeed sync.RWMutex + lockGetPcieThroughput sync.RWMutex + lockGetPerformanceModes sync.RWMutex + lockGetPerformanceState sync.RWMutex + lockGetPersistenceMode sync.RWMutex + lockGetPgpuMetadataString sync.RWMutex + lockGetPlatformInfo sync.RWMutex + lockGetPowerManagementDefaultLimit sync.RWMutex + lockGetPowerManagementLimit sync.RWMutex + lockGetPowerManagementLimitConstraints sync.RWMutex + lockGetPowerManagementMode sync.RWMutex + lockGetPowerSource sync.RWMutex + lockGetPowerState sync.RWMutex + lockGetPowerUsage sync.RWMutex + lockGetProcessUtilization sync.RWMutex + lockGetProcessesUtilizationInfo sync.RWMutex + lockGetRemappedRows sync.RWMutex + lockGetRetiredPages sync.RWMutex + lockGetRetiredPagesPendingStatus sync.RWMutex + lockGetRetiredPages_v2 sync.RWMutex + lockGetRowRemapperHistogram sync.RWMutex + lockGetRunningProcessDetailList sync.RWMutex + lockGetSamples sync.RWMutex + lockGetSerial sync.RWMutex + lockGetSramEccErrorStatus sync.RWMutex + lockGetSupportedClocksEventReasons sync.RWMutex + lockGetSupportedClocksThrottleReasons sync.RWMutex + lockGetSupportedEventTypes sync.RWMutex + lockGetSupportedGraphicsClocks sync.RWMutex + lockGetSupportedMemoryClocks sync.RWMutex + lockGetSupportedPerformanceStates sync.RWMutex + lockGetSupportedVgpus sync.RWMutex + lockGetTargetFanSpeed sync.RWMutex + lockGetTemperature sync.RWMutex + lockGetTemperatureThreshold sync.RWMutex + lockGetTemperatureV sync.RWMutex + lockGetThermalSettings sync.RWMutex + lockGetTopologyCommonAncestor sync.RWMutex + lockGetTopologyNearestGpus sync.RWMutex + lockGetTotalEccErrors sync.RWMutex + lockGetTotalEnergyConsumption sync.RWMutex + lockGetUUID sync.RWMutex + lockGetUtilizationRates sync.RWMutex + lockGetVbiosVersion sync.RWMutex + lockGetVgpuCapabilities sync.RWMutex + lockGetVgpuHeterogeneousMode sync.RWMutex + lockGetVgpuInstancesUtilizationInfo sync.RWMutex + lockGetVgpuMetadata sync.RWMutex + lockGetVgpuProcessUtilization sync.RWMutex + lockGetVgpuProcessesUtilizationInfo sync.RWMutex + lockGetVgpuSchedulerCapabilities sync.RWMutex + lockGetVgpuSchedulerLog sync.RWMutex + lockGetVgpuSchedulerState sync.RWMutex + lockGetVgpuTypeCreatablePlacements sync.RWMutex + lockGetVgpuTypeSupportedPlacements sync.RWMutex + lockGetVgpuUtilization sync.RWMutex + lockGetViolationStatus sync.RWMutex + lockGetVirtualizationMode sync.RWMutex + lockGpmMigSampleGet sync.RWMutex + lockGpmQueryDeviceSupport sync.RWMutex + lockGpmQueryDeviceSupportV sync.RWMutex + lockGpmQueryIfStreamingEnabled sync.RWMutex + lockGpmSampleGet sync.RWMutex + lockGpmSetStreamingEnabled sync.RWMutex + lockIsMigDeviceHandle sync.RWMutex + lockOnSameBoard sync.RWMutex + lockPowerSmoothingActivatePresetProfile sync.RWMutex + lockPowerSmoothingSetState sync.RWMutex + lockPowerSmoothingUpdatePresetProfileParam sync.RWMutex + lockRegisterEvents sync.RWMutex + lockResetApplicationsClocks sync.RWMutex + lockResetGpuLockedClocks sync.RWMutex + lockResetMemoryLockedClocks sync.RWMutex + lockResetNvLinkErrorCounters sync.RWMutex + lockResetNvLinkUtilizationCounter sync.RWMutex + lockSetAPIRestriction sync.RWMutex + lockSetAccountingMode sync.RWMutex + lockSetApplicationsClocks sync.RWMutex + lockSetAutoBoostedClocksEnabled sync.RWMutex + lockSetClockOffsets sync.RWMutex + lockSetComputeMode sync.RWMutex + lockSetConfComputeUnprotectedMemSize sync.RWMutex + lockSetCpuAffinity sync.RWMutex + lockSetDefaultAutoBoostedClocksEnabled sync.RWMutex + lockSetDefaultFanSpeed_v2 sync.RWMutex + lockSetDramEncryptionMode sync.RWMutex + lockSetDriverModel sync.RWMutex + lockSetEccMode sync.RWMutex + lockSetFanControlPolicy sync.RWMutex + lockSetFanSpeed_v2 sync.RWMutex + lockSetGpcClkVfOffset sync.RWMutex + lockSetGpuLockedClocks sync.RWMutex + lockSetGpuOperationMode sync.RWMutex + lockSetMemClkVfOffset sync.RWMutex + lockSetMemoryLockedClocks sync.RWMutex + lockSetMigMode sync.RWMutex + lockSetNvLinkDeviceLowPowerThreshold sync.RWMutex + lockSetNvLinkUtilizationControl sync.RWMutex + lockSetNvlinkBwMode sync.RWMutex + lockSetPersistenceMode sync.RWMutex + lockSetPowerManagementLimit sync.RWMutex + lockSetPowerManagementLimit_v2 sync.RWMutex + lockSetTemperatureThreshold sync.RWMutex + lockSetVgpuCapabilities sync.RWMutex + lockSetVgpuHeterogeneousMode sync.RWMutex + lockSetVgpuSchedulerState sync.RWMutex + lockSetVirtualizationMode sync.RWMutex + lockValidateInforom sync.RWMutex + lockVgpuTypeGetMaxInstances sync.RWMutex + lockWorkloadPowerProfileClearRequestedProfiles sync.RWMutex + lockWorkloadPowerProfileGetCurrentProfiles sync.RWMutex + lockWorkloadPowerProfileGetProfilesInfo sync.RWMutex + lockWorkloadPowerProfileSetRequestedProfiles sync.RWMutex } // ClearAccountingPids calls ClearAccountingPidsFunc. @@ -3041,6 +3502,60 @@ func (mock *Device) GetBusTypeCalls() []struct { return calls } +// GetC2cModeInfoV calls GetC2cModeInfoVFunc. +func (mock *Device) GetC2cModeInfoV() nvml.C2cModeInfoHandler { + if mock.GetC2cModeInfoVFunc == nil { + panic("Device.GetC2cModeInfoVFunc: method is nil but Device.GetC2cModeInfoV was just called") + } + callInfo := struct { + }{} + mock.lockGetC2cModeInfoV.Lock() + mock.calls.GetC2cModeInfoV = append(mock.calls.GetC2cModeInfoV, callInfo) + mock.lockGetC2cModeInfoV.Unlock() + return mock.GetC2cModeInfoVFunc() +} + +// GetC2cModeInfoVCalls gets all the calls that were made to GetC2cModeInfoV. +// Check the length with: +// +// len(mockedDevice.GetC2cModeInfoVCalls()) +func (mock *Device) GetC2cModeInfoVCalls() []struct { +} { + var calls []struct { + } + mock.lockGetC2cModeInfoV.RLock() + calls = mock.calls.GetC2cModeInfoV + mock.lockGetC2cModeInfoV.RUnlock() + return calls +} + +// GetCapabilities calls GetCapabilitiesFunc. +func (mock *Device) GetCapabilities() (nvml.DeviceCapabilities, nvml.Return) { + if mock.GetCapabilitiesFunc == nil { + panic("Device.GetCapabilitiesFunc: method is nil but Device.GetCapabilities was just called") + } + callInfo := struct { + }{} + mock.lockGetCapabilities.Lock() + mock.calls.GetCapabilities = append(mock.calls.GetCapabilities, callInfo) + mock.lockGetCapabilities.Unlock() + return mock.GetCapabilitiesFunc() +} + +// GetCapabilitiesCalls gets all the calls that were made to GetCapabilities. +// Check the length with: +// +// len(mockedDevice.GetCapabilitiesCalls()) +func (mock *Device) GetCapabilitiesCalls() []struct { +} { + var calls []struct { + } + mock.lockGetCapabilities.RLock() + calls = mock.calls.GetCapabilities + mock.lockGetCapabilities.RUnlock() + return calls +} + // GetClkMonStatus calls GetClkMonStatusFunc. func (mock *Device) GetClkMonStatus() (nvml.ClkMonStatus, nvml.Return) { if mock.GetClkMonStatusFunc == nil { @@ -3136,6 +3651,33 @@ func (mock *Device) GetClockInfoCalls() []struct { return calls } +// GetClockOffsets calls GetClockOffsetsFunc. +func (mock *Device) GetClockOffsets() (nvml.ClockOffset, nvml.Return) { + if mock.GetClockOffsetsFunc == nil { + panic("Device.GetClockOffsetsFunc: method is nil but Device.GetClockOffsets was just called") + } + callInfo := struct { + }{} + mock.lockGetClockOffsets.Lock() + mock.calls.GetClockOffsets = append(mock.calls.GetClockOffsets, callInfo) + mock.lockGetClockOffsets.Unlock() + return mock.GetClockOffsetsFunc() +} + +// GetClockOffsetsCalls gets all the calls that were made to GetClockOffsets. +// Check the length with: +// +// len(mockedDevice.GetClockOffsetsCalls()) +func (mock *Device) GetClockOffsetsCalls() []struct { +} { + var calls []struct { + } + mock.lockGetClockOffsets.RLock() + calls = mock.calls.GetClockOffsets + mock.lockGetClockOffsets.RUnlock() + return calls +} + // GetComputeInstanceId calls GetComputeInstanceIdFunc. func (mock *Device) GetComputeInstanceId() (int, nvml.Return) { if mock.GetComputeInstanceIdFunc == nil { @@ -3217,50 +3759,185 @@ func (mock *Device) GetComputeRunningProcessesCalls() []struct { return calls } -// GetCpuAffinity calls GetCpuAffinityFunc. -func (mock *Device) GetCpuAffinity(n int) ([]uint, nvml.Return) { - if mock.GetCpuAffinityFunc == nil { - panic("Device.GetCpuAffinityFunc: method is nil but Device.GetCpuAffinity was just called") +// GetConfComputeGpuAttestationReport calls GetConfComputeGpuAttestationReportFunc. +func (mock *Device) GetConfComputeGpuAttestationReport() (nvml.ConfComputeGpuAttestationReport, nvml.Return) { + if mock.GetConfComputeGpuAttestationReportFunc == nil { + panic("Device.GetConfComputeGpuAttestationReportFunc: method is nil but Device.GetConfComputeGpuAttestationReport was just called") } callInfo := struct { - N int - }{ - N: n, - } - mock.lockGetCpuAffinity.Lock() - mock.calls.GetCpuAffinity = append(mock.calls.GetCpuAffinity, callInfo) - mock.lockGetCpuAffinity.Unlock() - return mock.GetCpuAffinityFunc(n) + }{} + mock.lockGetConfComputeGpuAttestationReport.Lock() + mock.calls.GetConfComputeGpuAttestationReport = append(mock.calls.GetConfComputeGpuAttestationReport, callInfo) + mock.lockGetConfComputeGpuAttestationReport.Unlock() + return mock.GetConfComputeGpuAttestationReportFunc() } -// GetCpuAffinityCalls gets all the calls that were made to GetCpuAffinity. +// GetConfComputeGpuAttestationReportCalls gets all the calls that were made to GetConfComputeGpuAttestationReport. // Check the length with: // -// len(mockedDevice.GetCpuAffinityCalls()) -func (mock *Device) GetCpuAffinityCalls() []struct { - N int +// len(mockedDevice.GetConfComputeGpuAttestationReportCalls()) +func (mock *Device) GetConfComputeGpuAttestationReportCalls() []struct { } { var calls []struct { - N int } - mock.lockGetCpuAffinity.RLock() - calls = mock.calls.GetCpuAffinity - mock.lockGetCpuAffinity.RUnlock() + mock.lockGetConfComputeGpuAttestationReport.RLock() + calls = mock.calls.GetConfComputeGpuAttestationReport + mock.lockGetConfComputeGpuAttestationReport.RUnlock() return calls } -// GetCpuAffinityWithinScope calls GetCpuAffinityWithinScopeFunc. -func (mock *Device) GetCpuAffinityWithinScope(n int, affinityScope nvml.AffinityScope) ([]uint, nvml.Return) { - if mock.GetCpuAffinityWithinScopeFunc == nil { - panic("Device.GetCpuAffinityWithinScopeFunc: method is nil but Device.GetCpuAffinityWithinScope was just called") +// GetConfComputeGpuCertificate calls GetConfComputeGpuCertificateFunc. +func (mock *Device) GetConfComputeGpuCertificate() (nvml.ConfComputeGpuCertificate, nvml.Return) { + if mock.GetConfComputeGpuCertificateFunc == nil { + panic("Device.GetConfComputeGpuCertificateFunc: method is nil but Device.GetConfComputeGpuCertificate was just called") } callInfo := struct { - N int - AffinityScope nvml.AffinityScope - }{ - N: n, - AffinityScope: affinityScope, - } + }{} + mock.lockGetConfComputeGpuCertificate.Lock() + mock.calls.GetConfComputeGpuCertificate = append(mock.calls.GetConfComputeGpuCertificate, callInfo) + mock.lockGetConfComputeGpuCertificate.Unlock() + return mock.GetConfComputeGpuCertificateFunc() +} + +// GetConfComputeGpuCertificateCalls gets all the calls that were made to GetConfComputeGpuCertificate. +// Check the length with: +// +// len(mockedDevice.GetConfComputeGpuCertificateCalls()) +func (mock *Device) GetConfComputeGpuCertificateCalls() []struct { +} { + var calls []struct { + } + mock.lockGetConfComputeGpuCertificate.RLock() + calls = mock.calls.GetConfComputeGpuCertificate + mock.lockGetConfComputeGpuCertificate.RUnlock() + return calls +} + +// GetConfComputeMemSizeInfo calls GetConfComputeMemSizeInfoFunc. +func (mock *Device) GetConfComputeMemSizeInfo() (nvml.ConfComputeMemSizeInfo, nvml.Return) { + if mock.GetConfComputeMemSizeInfoFunc == nil { + panic("Device.GetConfComputeMemSizeInfoFunc: method is nil but Device.GetConfComputeMemSizeInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetConfComputeMemSizeInfo.Lock() + mock.calls.GetConfComputeMemSizeInfo = append(mock.calls.GetConfComputeMemSizeInfo, callInfo) + mock.lockGetConfComputeMemSizeInfo.Unlock() + return mock.GetConfComputeMemSizeInfoFunc() +} + +// GetConfComputeMemSizeInfoCalls gets all the calls that were made to GetConfComputeMemSizeInfo. +// Check the length with: +// +// len(mockedDevice.GetConfComputeMemSizeInfoCalls()) +func (mock *Device) GetConfComputeMemSizeInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetConfComputeMemSizeInfo.RLock() + calls = mock.calls.GetConfComputeMemSizeInfo + mock.lockGetConfComputeMemSizeInfo.RUnlock() + return calls +} + +// GetConfComputeProtectedMemoryUsage calls GetConfComputeProtectedMemoryUsageFunc. +func (mock *Device) GetConfComputeProtectedMemoryUsage() (nvml.Memory, nvml.Return) { + if mock.GetConfComputeProtectedMemoryUsageFunc == nil { + panic("Device.GetConfComputeProtectedMemoryUsageFunc: method is nil but Device.GetConfComputeProtectedMemoryUsage was just called") + } + callInfo := struct { + }{} + mock.lockGetConfComputeProtectedMemoryUsage.Lock() + mock.calls.GetConfComputeProtectedMemoryUsage = append(mock.calls.GetConfComputeProtectedMemoryUsage, callInfo) + mock.lockGetConfComputeProtectedMemoryUsage.Unlock() + return mock.GetConfComputeProtectedMemoryUsageFunc() +} + +// GetConfComputeProtectedMemoryUsageCalls gets all the calls that were made to GetConfComputeProtectedMemoryUsage. +// Check the length with: +// +// len(mockedDevice.GetConfComputeProtectedMemoryUsageCalls()) +func (mock *Device) GetConfComputeProtectedMemoryUsageCalls() []struct { +} { + var calls []struct { + } + mock.lockGetConfComputeProtectedMemoryUsage.RLock() + calls = mock.calls.GetConfComputeProtectedMemoryUsage + mock.lockGetConfComputeProtectedMemoryUsage.RUnlock() + return calls +} + +// GetCoolerInfo calls GetCoolerInfoFunc. +func (mock *Device) GetCoolerInfo() (nvml.CoolerInfo, nvml.Return) { + if mock.GetCoolerInfoFunc == nil { + panic("Device.GetCoolerInfoFunc: method is nil but Device.GetCoolerInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetCoolerInfo.Lock() + mock.calls.GetCoolerInfo = append(mock.calls.GetCoolerInfo, callInfo) + mock.lockGetCoolerInfo.Unlock() + return mock.GetCoolerInfoFunc() +} + +// GetCoolerInfoCalls gets all the calls that were made to GetCoolerInfo. +// Check the length with: +// +// len(mockedDevice.GetCoolerInfoCalls()) +func (mock *Device) GetCoolerInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetCoolerInfo.RLock() + calls = mock.calls.GetCoolerInfo + mock.lockGetCoolerInfo.RUnlock() + return calls +} + +// GetCpuAffinity calls GetCpuAffinityFunc. +func (mock *Device) GetCpuAffinity(n int) ([]uint, nvml.Return) { + if mock.GetCpuAffinityFunc == nil { + panic("Device.GetCpuAffinityFunc: method is nil but Device.GetCpuAffinity was just called") + } + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetCpuAffinity.Lock() + mock.calls.GetCpuAffinity = append(mock.calls.GetCpuAffinity, callInfo) + mock.lockGetCpuAffinity.Unlock() + return mock.GetCpuAffinityFunc(n) +} + +// GetCpuAffinityCalls gets all the calls that were made to GetCpuAffinity. +// Check the length with: +// +// len(mockedDevice.GetCpuAffinityCalls()) +func (mock *Device) GetCpuAffinityCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetCpuAffinity.RLock() + calls = mock.calls.GetCpuAffinity + mock.lockGetCpuAffinity.RUnlock() + return calls +} + +// GetCpuAffinityWithinScope calls GetCpuAffinityWithinScopeFunc. +func (mock *Device) GetCpuAffinityWithinScope(n int, affinityScope nvml.AffinityScope) ([]uint, nvml.Return) { + if mock.GetCpuAffinityWithinScopeFunc == nil { + panic("Device.GetCpuAffinityWithinScopeFunc: method is nil but Device.GetCpuAffinityWithinScope was just called") + } + callInfo := struct { + N int + AffinityScope nvml.AffinityScope + }{ + N: n, + AffinityScope: affinityScope, + } mock.lockGetCpuAffinityWithinScope.Lock() mock.calls.GetCpuAffinityWithinScope = append(mock.calls.GetCpuAffinityWithinScope, callInfo) mock.lockGetCpuAffinityWithinScope.Unlock() @@ -3393,6 +4070,60 @@ func (mock *Device) GetCurrPcieLinkWidthCalls() []struct { return calls } +// GetCurrentClockFreqs calls GetCurrentClockFreqsFunc. +func (mock *Device) GetCurrentClockFreqs() (nvml.DeviceCurrentClockFreqs, nvml.Return) { + if mock.GetCurrentClockFreqsFunc == nil { + panic("Device.GetCurrentClockFreqsFunc: method is nil but Device.GetCurrentClockFreqs was just called") + } + callInfo := struct { + }{} + mock.lockGetCurrentClockFreqs.Lock() + mock.calls.GetCurrentClockFreqs = append(mock.calls.GetCurrentClockFreqs, callInfo) + mock.lockGetCurrentClockFreqs.Unlock() + return mock.GetCurrentClockFreqsFunc() +} + +// GetCurrentClockFreqsCalls gets all the calls that were made to GetCurrentClockFreqs. +// Check the length with: +// +// len(mockedDevice.GetCurrentClockFreqsCalls()) +func (mock *Device) GetCurrentClockFreqsCalls() []struct { +} { + var calls []struct { + } + mock.lockGetCurrentClockFreqs.RLock() + calls = mock.calls.GetCurrentClockFreqs + mock.lockGetCurrentClockFreqs.RUnlock() + return calls +} + +// GetCurrentClocksEventReasons calls GetCurrentClocksEventReasonsFunc. +func (mock *Device) GetCurrentClocksEventReasons() (uint64, nvml.Return) { + if mock.GetCurrentClocksEventReasonsFunc == nil { + panic("Device.GetCurrentClocksEventReasonsFunc: method is nil but Device.GetCurrentClocksEventReasons was just called") + } + callInfo := struct { + }{} + mock.lockGetCurrentClocksEventReasons.Lock() + mock.calls.GetCurrentClocksEventReasons = append(mock.calls.GetCurrentClocksEventReasons, callInfo) + mock.lockGetCurrentClocksEventReasons.Unlock() + return mock.GetCurrentClocksEventReasonsFunc() +} + +// GetCurrentClocksEventReasonsCalls gets all the calls that were made to GetCurrentClocksEventReasons. +// Check the length with: +// +// len(mockedDevice.GetCurrentClocksEventReasonsCalls()) +func (mock *Device) GetCurrentClocksEventReasonsCalls() []struct { +} { + var calls []struct { + } + mock.lockGetCurrentClocksEventReasons.RLock() + calls = mock.calls.GetCurrentClocksEventReasons + mock.lockGetCurrentClocksEventReasons.RUnlock() + return calls +} + // GetCurrentClocksThrottleReasons calls GetCurrentClocksThrottleReasonsFunc. func (mock *Device) GetCurrentClocksThrottleReasons() (uint64, nvml.Return) { if mock.GetCurrentClocksThrottleReasonsFunc == nil { @@ -3623,6 +4354,33 @@ func (mock *Device) GetDisplayModeCalls() []struct { return calls } +// GetDramEncryptionMode calls GetDramEncryptionModeFunc. +func (mock *Device) GetDramEncryptionMode() (nvml.DramEncryptionInfo, nvml.DramEncryptionInfo, nvml.Return) { + if mock.GetDramEncryptionModeFunc == nil { + panic("Device.GetDramEncryptionModeFunc: method is nil but Device.GetDramEncryptionMode was just called") + } + callInfo := struct { + }{} + mock.lockGetDramEncryptionMode.Lock() + mock.calls.GetDramEncryptionMode = append(mock.calls.GetDramEncryptionMode, callInfo) + mock.lockGetDramEncryptionMode.Unlock() + return mock.GetDramEncryptionModeFunc() +} + +// GetDramEncryptionModeCalls gets all the calls that were made to GetDramEncryptionMode. +// Check the length with: +// +// len(mockedDevice.GetDramEncryptionModeCalls()) +func (mock *Device) GetDramEncryptionModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetDramEncryptionMode.RLock() + calls = mock.calls.GetDramEncryptionMode + mock.lockGetDramEncryptionMode.RUnlock() + return calls +} + // GetDriverModel calls GetDriverModelFunc. func (mock *Device) GetDriverModel() (nvml.DriverModel, nvml.DriverModel, nvml.Return) { if mock.GetDriverModelFunc == nil { @@ -3650,6 +4408,33 @@ func (mock *Device) GetDriverModelCalls() []struct { return calls } +// GetDriverModel_v2 calls GetDriverModel_v2Func. +func (mock *Device) GetDriverModel_v2() (nvml.DriverModel, nvml.DriverModel, nvml.Return) { + if mock.GetDriverModel_v2Func == nil { + panic("Device.GetDriverModel_v2Func: method is nil but Device.GetDriverModel_v2 was just called") + } + callInfo := struct { + }{} + mock.lockGetDriverModel_v2.Lock() + mock.calls.GetDriverModel_v2 = append(mock.calls.GetDriverModel_v2, callInfo) + mock.lockGetDriverModel_v2.Unlock() + return mock.GetDriverModel_v2Func() +} + +// GetDriverModel_v2Calls gets all the calls that were made to GetDriverModel_v2. +// Check the length with: +// +// len(mockedDevice.GetDriverModel_v2Calls()) +func (mock *Device) GetDriverModel_v2Calls() []struct { +} { + var calls []struct { + } + mock.lockGetDriverModel_v2.RLock() + calls = mock.calls.GetDriverModel_v2 + mock.lockGetDriverModel_v2.RUnlock() + return calls +} + // GetDynamicPstatesInfo calls GetDynamicPstatesInfoFunc. func (mock *Device) GetDynamicPstatesInfo() (nvml.GpuDynamicPstatesInfo, nvml.Return) { if mock.GetDynamicPstatesInfoFunc == nil { @@ -3957,6 +4742,33 @@ func (mock *Device) GetFanSpeedCalls() []struct { return calls } +// GetFanSpeedRPM calls GetFanSpeedRPMFunc. +func (mock *Device) GetFanSpeedRPM() (nvml.FanSpeedInfo, nvml.Return) { + if mock.GetFanSpeedRPMFunc == nil { + panic("Device.GetFanSpeedRPMFunc: method is nil but Device.GetFanSpeedRPM was just called") + } + callInfo := struct { + }{} + mock.lockGetFanSpeedRPM.Lock() + mock.calls.GetFanSpeedRPM = append(mock.calls.GetFanSpeedRPM, callInfo) + mock.lockGetFanSpeedRPM.Unlock() + return mock.GetFanSpeedRPMFunc() +} + +// GetFanSpeedRPMCalls gets all the calls that were made to GetFanSpeedRPM. +// Check the length with: +// +// len(mockedDevice.GetFanSpeedRPMCalls()) +func (mock *Device) GetFanSpeedRPMCalls() []struct { +} { + var calls []struct { + } + mock.lockGetFanSpeedRPM.RLock() + calls = mock.calls.GetFanSpeedRPM + mock.lockGetFanSpeedRPM.RUnlock() + return calls +} + // GetFanSpeed_v2 calls GetFanSpeed_v2Func. func (mock *Device) GetFanSpeed_v2(n int) (uint32, nvml.Return) { if mock.GetFanSpeed_v2Func == nil { @@ -4102,6 +4914,33 @@ func (mock *Device) GetGpuFabricInfoCalls() []struct { return calls } +// GetGpuFabricInfoV calls GetGpuFabricInfoVFunc. +func (mock *Device) GetGpuFabricInfoV() nvml.GpuFabricInfoHandler { + if mock.GetGpuFabricInfoVFunc == nil { + panic("Device.GetGpuFabricInfoVFunc: method is nil but Device.GetGpuFabricInfoV was just called") + } + callInfo := struct { + }{} + mock.lockGetGpuFabricInfoV.Lock() + mock.calls.GetGpuFabricInfoV = append(mock.calls.GetGpuFabricInfoV, callInfo) + mock.lockGetGpuFabricInfoV.Unlock() + return mock.GetGpuFabricInfoVFunc() +} + +// GetGpuFabricInfoVCalls gets all the calls that were made to GetGpuFabricInfoV. +// Check the length with: +// +// len(mockedDevice.GetGpuFabricInfoVCalls()) +func (mock *Device) GetGpuFabricInfoVCalls() []struct { +} { + var calls []struct { + } + mock.lockGetGpuFabricInfoV.RLock() + calls = mock.calls.GetGpuFabricInfoV + mock.lockGetGpuFabricInfoV.RUnlock() + return calls +} + // GetGpuInstanceById calls GetGpuInstanceByIdFunc. func (mock *Device) GetGpuInstanceById(n int) (nvml.GpuInstance, nvml.Return) { if mock.GetGpuInstanceByIdFunc == nil { @@ -4226,7 +5065,7 @@ func (mock *Device) GetGpuInstanceProfileInfoCalls() []struct { } // GetGpuInstanceProfileInfoV calls GetGpuInstanceProfileInfoVFunc. -func (mock *Device) GetGpuInstanceProfileInfoV(n int) nvml.GpuInstanceProfileInfoV { +func (mock *Device) GetGpuInstanceProfileInfoV(n int) nvml.GpuInstanceProfileInfoHandler { if mock.GetGpuInstanceProfileInfoVFunc == nil { panic("Device.GetGpuInstanceProfileInfoVFunc: method is nil but Device.GetGpuInstanceProfileInfoV was just called") } @@ -4650,6 +5489,60 @@ func (mock *Device) GetIrqNumCalls() []struct { return calls } +// GetJpgUtilization calls GetJpgUtilizationFunc. +func (mock *Device) GetJpgUtilization() (uint32, uint32, nvml.Return) { + if mock.GetJpgUtilizationFunc == nil { + panic("Device.GetJpgUtilizationFunc: method is nil but Device.GetJpgUtilization was just called") + } + callInfo := struct { + }{} + mock.lockGetJpgUtilization.Lock() + mock.calls.GetJpgUtilization = append(mock.calls.GetJpgUtilization, callInfo) + mock.lockGetJpgUtilization.Unlock() + return mock.GetJpgUtilizationFunc() +} + +// GetJpgUtilizationCalls gets all the calls that were made to GetJpgUtilization. +// Check the length with: +// +// len(mockedDevice.GetJpgUtilizationCalls()) +func (mock *Device) GetJpgUtilizationCalls() []struct { +} { + var calls []struct { + } + mock.lockGetJpgUtilization.RLock() + calls = mock.calls.GetJpgUtilization + mock.lockGetJpgUtilization.RUnlock() + return calls +} + +// GetLastBBXFlushTime calls GetLastBBXFlushTimeFunc. +func (mock *Device) GetLastBBXFlushTime() (uint64, uint, nvml.Return) { + if mock.GetLastBBXFlushTimeFunc == nil { + panic("Device.GetLastBBXFlushTimeFunc: method is nil but Device.GetLastBBXFlushTime was just called") + } + callInfo := struct { + }{} + mock.lockGetLastBBXFlushTime.Lock() + mock.calls.GetLastBBXFlushTime = append(mock.calls.GetLastBBXFlushTime, callInfo) + mock.lockGetLastBBXFlushTime.Unlock() + return mock.GetLastBBXFlushTimeFunc() +} + +// GetLastBBXFlushTimeCalls gets all the calls that were made to GetLastBBXFlushTime. +// Check the length with: +// +// len(mockedDevice.GetLastBBXFlushTimeCalls()) +func (mock *Device) GetLastBBXFlushTimeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetLastBBXFlushTime.RLock() + calls = mock.calls.GetLastBBXFlushTime + mock.lockGetLastBBXFlushTime.RUnlock() + return calls +} + // GetMPSComputeRunningProcesses calls GetMPSComputeRunningProcessesFunc. func (mock *Device) GetMPSComputeRunningProcesses() ([]nvml.ProcessInfo, nvml.Return) { if mock.GetMPSComputeRunningProcessesFunc == nil { @@ -4677,6 +5570,33 @@ func (mock *Device) GetMPSComputeRunningProcessesCalls() []struct { return calls } +// GetMarginTemperature calls GetMarginTemperatureFunc. +func (mock *Device) GetMarginTemperature() (nvml.MarginTemperature, nvml.Return) { + if mock.GetMarginTemperatureFunc == nil { + panic("Device.GetMarginTemperatureFunc: method is nil but Device.GetMarginTemperature was just called") + } + callInfo := struct { + }{} + mock.lockGetMarginTemperature.Lock() + mock.calls.GetMarginTemperature = append(mock.calls.GetMarginTemperature, callInfo) + mock.lockGetMarginTemperature.Unlock() + return mock.GetMarginTemperatureFunc() +} + +// GetMarginTemperatureCalls gets all the calls that were made to GetMarginTemperature. +// Check the length with: +// +// len(mockedDevice.GetMarginTemperatureCalls()) +func (mock *Device) GetMarginTemperatureCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMarginTemperature.RLock() + calls = mock.calls.GetMarginTemperature + mock.lockGetMarginTemperature.RUnlock() + return calls +} + // GetMaxClockInfo calls GetMaxClockInfoFunc. func (mock *Device) GetMaxClockInfo(clockType nvml.ClockType) (uint32, nvml.Return) { if mock.GetMaxClockInfoFunc == nil { @@ -5182,6 +6102,33 @@ func (mock *Device) GetMinorNumberCalls() []struct { return calls } +// GetModuleId calls GetModuleIdFunc. +func (mock *Device) GetModuleId() (int, nvml.Return) { + if mock.GetModuleIdFunc == nil { + panic("Device.GetModuleIdFunc: method is nil but Device.GetModuleId was just called") + } + callInfo := struct { + }{} + mock.lockGetModuleId.Lock() + mock.calls.GetModuleId = append(mock.calls.GetModuleId, callInfo) + mock.lockGetModuleId.Unlock() + return mock.GetModuleIdFunc() +} + +// GetModuleIdCalls gets all the calls that were made to GetModuleId. +// Check the length with: +// +// len(mockedDevice.GetModuleIdCalls()) +func (mock *Device) GetModuleIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetModuleId.RLock() + calls = mock.calls.GetModuleId + mock.lockGetModuleId.RUnlock() + return calls +} + // GetMultiGpuBoard calls GetMultiGpuBoardFunc. func (mock *Device) GetMultiGpuBoard() (int, nvml.Return) { if mock.GetMultiGpuBoardFunc == nil { @@ -5290,6 +6237,33 @@ func (mock *Device) GetNumGpuCoresCalls() []struct { return calls } +// GetNumaNodeId calls GetNumaNodeIdFunc. +func (mock *Device) GetNumaNodeId() (int, nvml.Return) { + if mock.GetNumaNodeIdFunc == nil { + panic("Device.GetNumaNodeIdFunc: method is nil but Device.GetNumaNodeId was just called") + } + callInfo := struct { + }{} + mock.lockGetNumaNodeId.Lock() + mock.calls.GetNumaNodeId = append(mock.calls.GetNumaNodeId, callInfo) + mock.lockGetNumaNodeId.Unlock() + return mock.GetNumaNodeIdFunc() +} + +// GetNumaNodeIdCalls gets all the calls that were made to GetNumaNodeId. +// Check the length with: +// +// len(mockedDevice.GetNumaNodeIdCalls()) +func (mock *Device) GetNumaNodeIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetNumaNodeId.RLock() + calls = mock.calls.GetNumaNodeId + mock.lockGetNumaNodeId.RUnlock() + return calls +} + // GetNvLinkCapability calls GetNvLinkCapabilityFunc. func (mock *Device) GetNvLinkCapability(n int, nvLinkCapability nvml.NvLinkCapability) (uint32, nvml.Return) { if mock.GetNvLinkCapabilityFunc == nil { @@ -5562,30 +6536,111 @@ func (mock *Device) GetNvLinkVersionCalls() []struct { return calls } -// GetP2PStatus calls GetP2PStatusFunc. -func (mock *Device) GetP2PStatus(device nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) { - if mock.GetP2PStatusFunc == nil { - panic("Device.GetP2PStatusFunc: method is nil but Device.GetP2PStatus was just called") +// GetNvlinkBwMode calls GetNvlinkBwModeFunc. +func (mock *Device) GetNvlinkBwMode() (nvml.NvlinkGetBwMode, nvml.Return) { + if mock.GetNvlinkBwModeFunc == nil { + panic("Device.GetNvlinkBwModeFunc: method is nil but Device.GetNvlinkBwMode was just called") } callInfo := struct { - Device nvml.Device - GpuP2PCapsIndex nvml.GpuP2PCapsIndex - }{ - Device: device, - GpuP2PCapsIndex: gpuP2PCapsIndex, - } - mock.lockGetP2PStatus.Lock() - mock.calls.GetP2PStatus = append(mock.calls.GetP2PStatus, callInfo) - mock.lockGetP2PStatus.Unlock() - return mock.GetP2PStatusFunc(device, gpuP2PCapsIndex) + }{} + mock.lockGetNvlinkBwMode.Lock() + mock.calls.GetNvlinkBwMode = append(mock.calls.GetNvlinkBwMode, callInfo) + mock.lockGetNvlinkBwMode.Unlock() + return mock.GetNvlinkBwModeFunc() } -// GetP2PStatusCalls gets all the calls that were made to GetP2PStatus. +// GetNvlinkBwModeCalls gets all the calls that were made to GetNvlinkBwMode. // Check the length with: // -// len(mockedDevice.GetP2PStatusCalls()) -func (mock *Device) GetP2PStatusCalls() []struct { - Device nvml.Device +// len(mockedDevice.GetNvlinkBwModeCalls()) +func (mock *Device) GetNvlinkBwModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetNvlinkBwMode.RLock() + calls = mock.calls.GetNvlinkBwMode + mock.lockGetNvlinkBwMode.RUnlock() + return calls +} + +// GetNvlinkSupportedBwModes calls GetNvlinkSupportedBwModesFunc. +func (mock *Device) GetNvlinkSupportedBwModes() (nvml.NvlinkSupportedBwModes, nvml.Return) { + if mock.GetNvlinkSupportedBwModesFunc == nil { + panic("Device.GetNvlinkSupportedBwModesFunc: method is nil but Device.GetNvlinkSupportedBwModes was just called") + } + callInfo := struct { + }{} + mock.lockGetNvlinkSupportedBwModes.Lock() + mock.calls.GetNvlinkSupportedBwModes = append(mock.calls.GetNvlinkSupportedBwModes, callInfo) + mock.lockGetNvlinkSupportedBwModes.Unlock() + return mock.GetNvlinkSupportedBwModesFunc() +} + +// GetNvlinkSupportedBwModesCalls gets all the calls that were made to GetNvlinkSupportedBwModes. +// Check the length with: +// +// len(mockedDevice.GetNvlinkSupportedBwModesCalls()) +func (mock *Device) GetNvlinkSupportedBwModesCalls() []struct { +} { + var calls []struct { + } + mock.lockGetNvlinkSupportedBwModes.RLock() + calls = mock.calls.GetNvlinkSupportedBwModes + mock.lockGetNvlinkSupportedBwModes.RUnlock() + return calls +} + +// GetOfaUtilization calls GetOfaUtilizationFunc. +func (mock *Device) GetOfaUtilization() (uint32, uint32, nvml.Return) { + if mock.GetOfaUtilizationFunc == nil { + panic("Device.GetOfaUtilizationFunc: method is nil but Device.GetOfaUtilization was just called") + } + callInfo := struct { + }{} + mock.lockGetOfaUtilization.Lock() + mock.calls.GetOfaUtilization = append(mock.calls.GetOfaUtilization, callInfo) + mock.lockGetOfaUtilization.Unlock() + return mock.GetOfaUtilizationFunc() +} + +// GetOfaUtilizationCalls gets all the calls that were made to GetOfaUtilization. +// Check the length with: +// +// len(mockedDevice.GetOfaUtilizationCalls()) +func (mock *Device) GetOfaUtilizationCalls() []struct { +} { + var calls []struct { + } + mock.lockGetOfaUtilization.RLock() + calls = mock.calls.GetOfaUtilization + mock.lockGetOfaUtilization.RUnlock() + return calls +} + +// GetP2PStatus calls GetP2PStatusFunc. +func (mock *Device) GetP2PStatus(device nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) { + if mock.GetP2PStatusFunc == nil { + panic("Device.GetP2PStatusFunc: method is nil but Device.GetP2PStatus was just called") + } + callInfo := struct { + Device nvml.Device + GpuP2PCapsIndex nvml.GpuP2PCapsIndex + }{ + Device: device, + GpuP2PCapsIndex: gpuP2PCapsIndex, + } + mock.lockGetP2PStatus.Lock() + mock.calls.GetP2PStatus = append(mock.calls.GetP2PStatus, callInfo) + mock.lockGetP2PStatus.Unlock() + return mock.GetP2PStatusFunc(device, gpuP2PCapsIndex) +} + +// GetP2PStatusCalls gets all the calls that were made to GetP2PStatus. +// Check the length with: +// +// len(mockedDevice.GetP2PStatusCalls()) +func (mock *Device) GetP2PStatusCalls() []struct { + Device nvml.Device GpuP2PCapsIndex nvml.GpuP2PCapsIndex } { var calls []struct { @@ -5625,6 +6680,33 @@ func (mock *Device) GetPciInfoCalls() []struct { return calls } +// GetPciInfoExt calls GetPciInfoExtFunc. +func (mock *Device) GetPciInfoExt() (nvml.PciInfoExt, nvml.Return) { + if mock.GetPciInfoExtFunc == nil { + panic("Device.GetPciInfoExtFunc: method is nil but Device.GetPciInfoExt was just called") + } + callInfo := struct { + }{} + mock.lockGetPciInfoExt.Lock() + mock.calls.GetPciInfoExt = append(mock.calls.GetPciInfoExt, callInfo) + mock.lockGetPciInfoExt.Unlock() + return mock.GetPciInfoExtFunc() +} + +// GetPciInfoExtCalls gets all the calls that were made to GetPciInfoExt. +// Check the length with: +// +// len(mockedDevice.GetPciInfoExtCalls()) +func (mock *Device) GetPciInfoExtCalls() []struct { +} { + var calls []struct { + } + mock.lockGetPciInfoExt.RLock() + calls = mock.calls.GetPciInfoExt + mock.lockGetPciInfoExt.RUnlock() + return calls +} + // GetPcieLinkMaxSpeed calls GetPcieLinkMaxSpeedFunc. func (mock *Device) GetPcieLinkMaxSpeed() (uint32, nvml.Return) { if mock.GetPcieLinkMaxSpeedFunc == nil { @@ -5738,6 +6820,33 @@ func (mock *Device) GetPcieThroughputCalls() []struct { return calls } +// GetPerformanceModes calls GetPerformanceModesFunc. +func (mock *Device) GetPerformanceModes() (nvml.DevicePerfModes, nvml.Return) { + if mock.GetPerformanceModesFunc == nil { + panic("Device.GetPerformanceModesFunc: method is nil but Device.GetPerformanceModes was just called") + } + callInfo := struct { + }{} + mock.lockGetPerformanceModes.Lock() + mock.calls.GetPerformanceModes = append(mock.calls.GetPerformanceModes, callInfo) + mock.lockGetPerformanceModes.Unlock() + return mock.GetPerformanceModesFunc() +} + +// GetPerformanceModesCalls gets all the calls that were made to GetPerformanceModes. +// Check the length with: +// +// len(mockedDevice.GetPerformanceModesCalls()) +func (mock *Device) GetPerformanceModesCalls() []struct { +} { + var calls []struct { + } + mock.lockGetPerformanceModes.RLock() + calls = mock.calls.GetPerformanceModes + mock.lockGetPerformanceModes.RUnlock() + return calls +} + // GetPerformanceState calls GetPerformanceStateFunc. func (mock *Device) GetPerformanceState() (nvml.Pstates, nvml.Return) { if mock.GetPerformanceStateFunc == nil { @@ -5819,6 +6928,33 @@ func (mock *Device) GetPgpuMetadataStringCalls() []struct { return calls } +// GetPlatformInfo calls GetPlatformInfoFunc. +func (mock *Device) GetPlatformInfo() (nvml.PlatformInfo, nvml.Return) { + if mock.GetPlatformInfoFunc == nil { + panic("Device.GetPlatformInfoFunc: method is nil but Device.GetPlatformInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetPlatformInfo.Lock() + mock.calls.GetPlatformInfo = append(mock.calls.GetPlatformInfo, callInfo) + mock.lockGetPlatformInfo.Unlock() + return mock.GetPlatformInfoFunc() +} + +// GetPlatformInfoCalls gets all the calls that were made to GetPlatformInfo. +// Check the length with: +// +// len(mockedDevice.GetPlatformInfoCalls()) +func (mock *Device) GetPlatformInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetPlatformInfo.RLock() + calls = mock.calls.GetPlatformInfo + mock.lockGetPlatformInfo.RUnlock() + return calls +} + // GetPowerManagementDefaultLimit calls GetPowerManagementDefaultLimitFunc. func (mock *Device) GetPowerManagementDefaultLimit() (uint32, nvml.Return) { if mock.GetPowerManagementDefaultLimitFunc == nil { @@ -6040,6 +7176,33 @@ func (mock *Device) GetProcessUtilizationCalls() []struct { return calls } +// GetProcessesUtilizationInfo calls GetProcessesUtilizationInfoFunc. +func (mock *Device) GetProcessesUtilizationInfo() (nvml.ProcessesUtilizationInfo, nvml.Return) { + if mock.GetProcessesUtilizationInfoFunc == nil { + panic("Device.GetProcessesUtilizationInfoFunc: method is nil but Device.GetProcessesUtilizationInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetProcessesUtilizationInfo.Lock() + mock.calls.GetProcessesUtilizationInfo = append(mock.calls.GetProcessesUtilizationInfo, callInfo) + mock.lockGetProcessesUtilizationInfo.Unlock() + return mock.GetProcessesUtilizationInfoFunc() +} + +// GetProcessesUtilizationInfoCalls gets all the calls that were made to GetProcessesUtilizationInfo. +// Check the length with: +// +// len(mockedDevice.GetProcessesUtilizationInfoCalls()) +func (mock *Device) GetProcessesUtilizationInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetProcessesUtilizationInfo.RLock() + calls = mock.calls.GetProcessesUtilizationInfo + mock.lockGetProcessesUtilizationInfo.RUnlock() + return calls +} + // GetRemappedRows calls GetRemappedRowsFunc. func (mock *Device) GetRemappedRows() (int, int, bool, bool, nvml.Return) { if mock.GetRemappedRowsFunc == nil { @@ -6185,6 +7348,33 @@ func (mock *Device) GetRowRemapperHistogramCalls() []struct { return calls } +// GetRunningProcessDetailList calls GetRunningProcessDetailListFunc. +func (mock *Device) GetRunningProcessDetailList() (nvml.ProcessDetailList, nvml.Return) { + if mock.GetRunningProcessDetailListFunc == nil { + panic("Device.GetRunningProcessDetailListFunc: method is nil but Device.GetRunningProcessDetailList was just called") + } + callInfo := struct { + }{} + mock.lockGetRunningProcessDetailList.Lock() + mock.calls.GetRunningProcessDetailList = append(mock.calls.GetRunningProcessDetailList, callInfo) + mock.lockGetRunningProcessDetailList.Unlock() + return mock.GetRunningProcessDetailListFunc() +} + +// GetRunningProcessDetailListCalls gets all the calls that were made to GetRunningProcessDetailList. +// Check the length with: +// +// len(mockedDevice.GetRunningProcessDetailListCalls()) +func (mock *Device) GetRunningProcessDetailListCalls() []struct { +} { + var calls []struct { + } + mock.lockGetRunningProcessDetailList.RLock() + calls = mock.calls.GetRunningProcessDetailList + mock.lockGetRunningProcessDetailList.RUnlock() + return calls +} + // GetSamples calls GetSamplesFunc. func (mock *Device) GetSamples(samplingType nvml.SamplingType, v uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) { if mock.GetSamplesFunc == nil { @@ -6248,6 +7438,60 @@ func (mock *Device) GetSerialCalls() []struct { return calls } +// GetSramEccErrorStatus calls GetSramEccErrorStatusFunc. +func (mock *Device) GetSramEccErrorStatus() (nvml.EccSramErrorStatus, nvml.Return) { + if mock.GetSramEccErrorStatusFunc == nil { + panic("Device.GetSramEccErrorStatusFunc: method is nil but Device.GetSramEccErrorStatus was just called") + } + callInfo := struct { + }{} + mock.lockGetSramEccErrorStatus.Lock() + mock.calls.GetSramEccErrorStatus = append(mock.calls.GetSramEccErrorStatus, callInfo) + mock.lockGetSramEccErrorStatus.Unlock() + return mock.GetSramEccErrorStatusFunc() +} + +// GetSramEccErrorStatusCalls gets all the calls that were made to GetSramEccErrorStatus. +// Check the length with: +// +// len(mockedDevice.GetSramEccErrorStatusCalls()) +func (mock *Device) GetSramEccErrorStatusCalls() []struct { +} { + var calls []struct { + } + mock.lockGetSramEccErrorStatus.RLock() + calls = mock.calls.GetSramEccErrorStatus + mock.lockGetSramEccErrorStatus.RUnlock() + return calls +} + +// GetSupportedClocksEventReasons calls GetSupportedClocksEventReasonsFunc. +func (mock *Device) GetSupportedClocksEventReasons() (uint64, nvml.Return) { + if mock.GetSupportedClocksEventReasonsFunc == nil { + panic("Device.GetSupportedClocksEventReasonsFunc: method is nil but Device.GetSupportedClocksEventReasons was just called") + } + callInfo := struct { + }{} + mock.lockGetSupportedClocksEventReasons.Lock() + mock.calls.GetSupportedClocksEventReasons = append(mock.calls.GetSupportedClocksEventReasons, callInfo) + mock.lockGetSupportedClocksEventReasons.Unlock() + return mock.GetSupportedClocksEventReasonsFunc() +} + +// GetSupportedClocksEventReasonsCalls gets all the calls that were made to GetSupportedClocksEventReasons. +// Check the length with: +// +// len(mockedDevice.GetSupportedClocksEventReasonsCalls()) +func (mock *Device) GetSupportedClocksEventReasonsCalls() []struct { +} { + var calls []struct { + } + mock.lockGetSupportedClocksEventReasons.RLock() + calls = mock.calls.GetSupportedClocksEventReasons + mock.lockGetSupportedClocksEventReasons.RUnlock() + return calls +} + // GetSupportedClocksThrottleReasons calls GetSupportedClocksThrottleReasonsFunc. func (mock *Device) GetSupportedClocksThrottleReasons() (uint64, nvml.Return) { if mock.GetSupportedClocksThrottleReasonsFunc == nil { @@ -6511,6 +7755,33 @@ func (mock *Device) GetTemperatureThresholdCalls() []struct { return calls } +// GetTemperatureV calls GetTemperatureVFunc. +func (mock *Device) GetTemperatureV() nvml.TemperatureHandler { + if mock.GetTemperatureVFunc == nil { + panic("Device.GetTemperatureVFunc: method is nil but Device.GetTemperatureV was just called") + } + callInfo := struct { + }{} + mock.lockGetTemperatureV.Lock() + mock.calls.GetTemperatureV = append(mock.calls.GetTemperatureV, callInfo) + mock.lockGetTemperatureV.Unlock() + return mock.GetTemperatureVFunc() +} + +// GetTemperatureVCalls gets all the calls that were made to GetTemperatureV. +// Check the length with: +// +// len(mockedDevice.GetTemperatureVCalls()) +func (mock *Device) GetTemperatureVCalls() []struct { +} { + var calls []struct { + } + mock.lockGetTemperatureV.RLock() + calls = mock.calls.GetTemperatureV + mock.lockGetTemperatureV.RUnlock() + return calls +} + // GetThermalSettings calls GetThermalSettingsFunc. func (mock *Device) GetThermalSettings(v uint32) (nvml.GpuThermalSettings, nvml.Return) { if mock.GetThermalSettingsFunc == nil { @@ -6783,6 +8054,60 @@ func (mock *Device) GetVgpuCapabilitiesCalls() []struct { return calls } +// GetVgpuHeterogeneousMode calls GetVgpuHeterogeneousModeFunc. +func (mock *Device) GetVgpuHeterogeneousMode() (nvml.VgpuHeterogeneousMode, nvml.Return) { + if mock.GetVgpuHeterogeneousModeFunc == nil { + panic("Device.GetVgpuHeterogeneousModeFunc: method is nil but Device.GetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuHeterogeneousMode.Lock() + mock.calls.GetVgpuHeterogeneousMode = append(mock.calls.GetVgpuHeterogeneousMode, callInfo) + mock.lockGetVgpuHeterogeneousMode.Unlock() + return mock.GetVgpuHeterogeneousModeFunc() +} + +// GetVgpuHeterogeneousModeCalls gets all the calls that were made to GetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedDevice.GetVgpuHeterogeneousModeCalls()) +func (mock *Device) GetVgpuHeterogeneousModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuHeterogeneousMode.RLock() + calls = mock.calls.GetVgpuHeterogeneousMode + mock.lockGetVgpuHeterogeneousMode.RUnlock() + return calls +} + +// GetVgpuInstancesUtilizationInfo calls GetVgpuInstancesUtilizationInfoFunc. +func (mock *Device) GetVgpuInstancesUtilizationInfo() (nvml.VgpuInstancesUtilizationInfo, nvml.Return) { + if mock.GetVgpuInstancesUtilizationInfoFunc == nil { + panic("Device.GetVgpuInstancesUtilizationInfoFunc: method is nil but Device.GetVgpuInstancesUtilizationInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuInstancesUtilizationInfo.Lock() + mock.calls.GetVgpuInstancesUtilizationInfo = append(mock.calls.GetVgpuInstancesUtilizationInfo, callInfo) + mock.lockGetVgpuInstancesUtilizationInfo.Unlock() + return mock.GetVgpuInstancesUtilizationInfoFunc() +} + +// GetVgpuInstancesUtilizationInfoCalls gets all the calls that were made to GetVgpuInstancesUtilizationInfo. +// Check the length with: +// +// len(mockedDevice.GetVgpuInstancesUtilizationInfoCalls()) +func (mock *Device) GetVgpuInstancesUtilizationInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuInstancesUtilizationInfo.RLock() + calls = mock.calls.GetVgpuInstancesUtilizationInfo + mock.lockGetVgpuInstancesUtilizationInfo.RUnlock() + return calls +} + // GetVgpuMetadata calls GetVgpuMetadataFunc. func (mock *Device) GetVgpuMetadata() (nvml.VgpuPgpuMetadata, nvml.Return) { if mock.GetVgpuMetadataFunc == nil { @@ -6842,6 +8167,33 @@ func (mock *Device) GetVgpuProcessUtilizationCalls() []struct { return calls } +// GetVgpuProcessesUtilizationInfo calls GetVgpuProcessesUtilizationInfoFunc. +func (mock *Device) GetVgpuProcessesUtilizationInfo() (nvml.VgpuProcessesUtilizationInfo, nvml.Return) { + if mock.GetVgpuProcessesUtilizationInfoFunc == nil { + panic("Device.GetVgpuProcessesUtilizationInfoFunc: method is nil but Device.GetVgpuProcessesUtilizationInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuProcessesUtilizationInfo.Lock() + mock.calls.GetVgpuProcessesUtilizationInfo = append(mock.calls.GetVgpuProcessesUtilizationInfo, callInfo) + mock.lockGetVgpuProcessesUtilizationInfo.Unlock() + return mock.GetVgpuProcessesUtilizationInfoFunc() +} + +// GetVgpuProcessesUtilizationInfoCalls gets all the calls that were made to GetVgpuProcessesUtilizationInfo. +// Check the length with: +// +// len(mockedDevice.GetVgpuProcessesUtilizationInfoCalls()) +func (mock *Device) GetVgpuProcessesUtilizationInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuProcessesUtilizationInfo.RLock() + calls = mock.calls.GetVgpuProcessesUtilizationInfo + mock.lockGetVgpuProcessesUtilizationInfo.RUnlock() + return calls +} + // GetVgpuSchedulerCapabilities calls GetVgpuSchedulerCapabilitiesFunc. func (mock *Device) GetVgpuSchedulerCapabilities() (nvml.VgpuSchedulerCapabilities, nvml.Return) { if mock.GetVgpuSchedulerCapabilitiesFunc == nil { @@ -6923,6 +8275,70 @@ func (mock *Device) GetVgpuSchedulerStateCalls() []struct { return calls } +// GetVgpuTypeCreatablePlacements calls GetVgpuTypeCreatablePlacementsFunc. +func (mock *Device) GetVgpuTypeCreatablePlacements(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { + if mock.GetVgpuTypeCreatablePlacementsFunc == nil { + panic("Device.GetVgpuTypeCreatablePlacementsFunc: method is nil but Device.GetVgpuTypeCreatablePlacements was just called") + } + callInfo := struct { + VgpuTypeId nvml.VgpuTypeId + }{ + VgpuTypeId: vgpuTypeId, + } + mock.lockGetVgpuTypeCreatablePlacements.Lock() + mock.calls.GetVgpuTypeCreatablePlacements = append(mock.calls.GetVgpuTypeCreatablePlacements, callInfo) + mock.lockGetVgpuTypeCreatablePlacements.Unlock() + return mock.GetVgpuTypeCreatablePlacementsFunc(vgpuTypeId) +} + +// GetVgpuTypeCreatablePlacementsCalls gets all the calls that were made to GetVgpuTypeCreatablePlacements. +// Check the length with: +// +// len(mockedDevice.GetVgpuTypeCreatablePlacementsCalls()) +func (mock *Device) GetVgpuTypeCreatablePlacementsCalls() []struct { + VgpuTypeId nvml.VgpuTypeId +} { + var calls []struct { + VgpuTypeId nvml.VgpuTypeId + } + mock.lockGetVgpuTypeCreatablePlacements.RLock() + calls = mock.calls.GetVgpuTypeCreatablePlacements + mock.lockGetVgpuTypeCreatablePlacements.RUnlock() + return calls +} + +// GetVgpuTypeSupportedPlacements calls GetVgpuTypeSupportedPlacementsFunc. +func (mock *Device) GetVgpuTypeSupportedPlacements(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { + if mock.GetVgpuTypeSupportedPlacementsFunc == nil { + panic("Device.GetVgpuTypeSupportedPlacementsFunc: method is nil but Device.GetVgpuTypeSupportedPlacements was just called") + } + callInfo := struct { + VgpuTypeId nvml.VgpuTypeId + }{ + VgpuTypeId: vgpuTypeId, + } + mock.lockGetVgpuTypeSupportedPlacements.Lock() + mock.calls.GetVgpuTypeSupportedPlacements = append(mock.calls.GetVgpuTypeSupportedPlacements, callInfo) + mock.lockGetVgpuTypeSupportedPlacements.Unlock() + return mock.GetVgpuTypeSupportedPlacementsFunc(vgpuTypeId) +} + +// GetVgpuTypeSupportedPlacementsCalls gets all the calls that were made to GetVgpuTypeSupportedPlacements. +// Check the length with: +// +// len(mockedDevice.GetVgpuTypeSupportedPlacementsCalls()) +func (mock *Device) GetVgpuTypeSupportedPlacementsCalls() []struct { + VgpuTypeId nvml.VgpuTypeId +} { + var calls []struct { + VgpuTypeId nvml.VgpuTypeId + } + mock.lockGetVgpuTypeSupportedPlacements.RLock() + calls = mock.calls.GetVgpuTypeSupportedPlacements + mock.lockGetVgpuTypeSupportedPlacements.RUnlock() + return calls +} + // GetVgpuUtilization calls GetVgpuUtilizationFunc. func (mock *Device) GetVgpuUtilization(v uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) { if mock.GetVgpuUtilizationFunc == nil { @@ -7104,6 +8520,33 @@ func (mock *Device) GpmQueryDeviceSupportVCalls() []struct { return calls } +// GpmQueryIfStreamingEnabled calls GpmQueryIfStreamingEnabledFunc. +func (mock *Device) GpmQueryIfStreamingEnabled() (uint32, nvml.Return) { + if mock.GpmQueryIfStreamingEnabledFunc == nil { + panic("Device.GpmQueryIfStreamingEnabledFunc: method is nil but Device.GpmQueryIfStreamingEnabled was just called") + } + callInfo := struct { + }{} + mock.lockGpmQueryIfStreamingEnabled.Lock() + mock.calls.GpmQueryIfStreamingEnabled = append(mock.calls.GpmQueryIfStreamingEnabled, callInfo) + mock.lockGpmQueryIfStreamingEnabled.Unlock() + return mock.GpmQueryIfStreamingEnabledFunc() +} + +// GpmQueryIfStreamingEnabledCalls gets all the calls that were made to GpmQueryIfStreamingEnabled. +// Check the length with: +// +// len(mockedDevice.GpmQueryIfStreamingEnabledCalls()) +func (mock *Device) GpmQueryIfStreamingEnabledCalls() []struct { +} { + var calls []struct { + } + mock.lockGpmQueryIfStreamingEnabled.RLock() + calls = mock.calls.GpmQueryIfStreamingEnabled + mock.lockGpmQueryIfStreamingEnabled.RUnlock() + return calls +} + // GpmSampleGet calls GpmSampleGetFunc. func (mock *Device) GpmSampleGet(gpmSample nvml.GpmSample) nvml.Return { if mock.GpmSampleGetFunc == nil { @@ -7136,6 +8579,38 @@ func (mock *Device) GpmSampleGetCalls() []struct { return calls } +// GpmSetStreamingEnabled calls GpmSetStreamingEnabledFunc. +func (mock *Device) GpmSetStreamingEnabled(v uint32) nvml.Return { + if mock.GpmSetStreamingEnabledFunc == nil { + panic("Device.GpmSetStreamingEnabledFunc: method is nil but Device.GpmSetStreamingEnabled was just called") + } + callInfo := struct { + V uint32 + }{ + V: v, + } + mock.lockGpmSetStreamingEnabled.Lock() + mock.calls.GpmSetStreamingEnabled = append(mock.calls.GpmSetStreamingEnabled, callInfo) + mock.lockGpmSetStreamingEnabled.Unlock() + return mock.GpmSetStreamingEnabledFunc(v) +} + +// GpmSetStreamingEnabledCalls gets all the calls that were made to GpmSetStreamingEnabled. +// Check the length with: +// +// len(mockedDevice.GpmSetStreamingEnabledCalls()) +func (mock *Device) GpmSetStreamingEnabledCalls() []struct { + V uint32 +} { + var calls []struct { + V uint32 + } + mock.lockGpmSetStreamingEnabled.RLock() + calls = mock.calls.GpmSetStreamingEnabled + mock.lockGpmSetStreamingEnabled.RUnlock() + return calls +} + // IsMigDeviceHandle calls IsMigDeviceHandleFunc. func (mock *Device) IsMigDeviceHandle() (bool, nvml.Return) { if mock.IsMigDeviceHandleFunc == nil { @@ -7195,6 +8670,102 @@ func (mock *Device) OnSameBoardCalls() []struct { return calls } +// PowerSmoothingActivatePresetProfile calls PowerSmoothingActivatePresetProfileFunc. +func (mock *Device) PowerSmoothingActivatePresetProfile(powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { + if mock.PowerSmoothingActivatePresetProfileFunc == nil { + panic("Device.PowerSmoothingActivatePresetProfileFunc: method is nil but Device.PowerSmoothingActivatePresetProfile was just called") + } + callInfo := struct { + PowerSmoothingProfile *nvml.PowerSmoothingProfile + }{ + PowerSmoothingProfile: powerSmoothingProfile, + } + mock.lockPowerSmoothingActivatePresetProfile.Lock() + mock.calls.PowerSmoothingActivatePresetProfile = append(mock.calls.PowerSmoothingActivatePresetProfile, callInfo) + mock.lockPowerSmoothingActivatePresetProfile.Unlock() + return mock.PowerSmoothingActivatePresetProfileFunc(powerSmoothingProfile) +} + +// PowerSmoothingActivatePresetProfileCalls gets all the calls that were made to PowerSmoothingActivatePresetProfile. +// Check the length with: +// +// len(mockedDevice.PowerSmoothingActivatePresetProfileCalls()) +func (mock *Device) PowerSmoothingActivatePresetProfileCalls() []struct { + PowerSmoothingProfile *nvml.PowerSmoothingProfile +} { + var calls []struct { + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } + mock.lockPowerSmoothingActivatePresetProfile.RLock() + calls = mock.calls.PowerSmoothingActivatePresetProfile + mock.lockPowerSmoothingActivatePresetProfile.RUnlock() + return calls +} + +// PowerSmoothingSetState calls PowerSmoothingSetStateFunc. +func (mock *Device) PowerSmoothingSetState(powerSmoothingState *nvml.PowerSmoothingState) nvml.Return { + if mock.PowerSmoothingSetStateFunc == nil { + panic("Device.PowerSmoothingSetStateFunc: method is nil but Device.PowerSmoothingSetState was just called") + } + callInfo := struct { + PowerSmoothingState *nvml.PowerSmoothingState + }{ + PowerSmoothingState: powerSmoothingState, + } + mock.lockPowerSmoothingSetState.Lock() + mock.calls.PowerSmoothingSetState = append(mock.calls.PowerSmoothingSetState, callInfo) + mock.lockPowerSmoothingSetState.Unlock() + return mock.PowerSmoothingSetStateFunc(powerSmoothingState) +} + +// PowerSmoothingSetStateCalls gets all the calls that were made to PowerSmoothingSetState. +// Check the length with: +// +// len(mockedDevice.PowerSmoothingSetStateCalls()) +func (mock *Device) PowerSmoothingSetStateCalls() []struct { + PowerSmoothingState *nvml.PowerSmoothingState +} { + var calls []struct { + PowerSmoothingState *nvml.PowerSmoothingState + } + mock.lockPowerSmoothingSetState.RLock() + calls = mock.calls.PowerSmoothingSetState + mock.lockPowerSmoothingSetState.RUnlock() + return calls +} + +// PowerSmoothingUpdatePresetProfileParam calls PowerSmoothingUpdatePresetProfileParamFunc. +func (mock *Device) PowerSmoothingUpdatePresetProfileParam(powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { + if mock.PowerSmoothingUpdatePresetProfileParamFunc == nil { + panic("Device.PowerSmoothingUpdatePresetProfileParamFunc: method is nil but Device.PowerSmoothingUpdatePresetProfileParam was just called") + } + callInfo := struct { + PowerSmoothingProfile *nvml.PowerSmoothingProfile + }{ + PowerSmoothingProfile: powerSmoothingProfile, + } + mock.lockPowerSmoothingUpdatePresetProfileParam.Lock() + mock.calls.PowerSmoothingUpdatePresetProfileParam = append(mock.calls.PowerSmoothingUpdatePresetProfileParam, callInfo) + mock.lockPowerSmoothingUpdatePresetProfileParam.Unlock() + return mock.PowerSmoothingUpdatePresetProfileParamFunc(powerSmoothingProfile) +} + +// PowerSmoothingUpdatePresetProfileParamCalls gets all the calls that were made to PowerSmoothingUpdatePresetProfileParam. +// Check the length with: +// +// len(mockedDevice.PowerSmoothingUpdatePresetProfileParamCalls()) +func (mock *Device) PowerSmoothingUpdatePresetProfileParamCalls() []struct { + PowerSmoothingProfile *nvml.PowerSmoothingProfile +} { + var calls []struct { + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } + mock.lockPowerSmoothingUpdatePresetProfileParam.RLock() + calls = mock.calls.PowerSmoothingUpdatePresetProfileParam + mock.lockPowerSmoothingUpdatePresetProfileParam.RUnlock() + return calls +} + // RegisterEvents calls RegisterEventsFunc. func (mock *Device) RegisterEvents(v uint64, eventSet nvml.EventSet) nvml.Return { if mock.RegisterEventsFunc == nil { @@ -7516,6 +9087,38 @@ func (mock *Device) SetAutoBoostedClocksEnabledCalls() []struct { return calls } +// SetClockOffsets calls SetClockOffsetsFunc. +func (mock *Device) SetClockOffsets(clockOffset nvml.ClockOffset) nvml.Return { + if mock.SetClockOffsetsFunc == nil { + panic("Device.SetClockOffsetsFunc: method is nil but Device.SetClockOffsets was just called") + } + callInfo := struct { + ClockOffset nvml.ClockOffset + }{ + ClockOffset: clockOffset, + } + mock.lockSetClockOffsets.Lock() + mock.calls.SetClockOffsets = append(mock.calls.SetClockOffsets, callInfo) + mock.lockSetClockOffsets.Unlock() + return mock.SetClockOffsetsFunc(clockOffset) +} + +// SetClockOffsetsCalls gets all the calls that were made to SetClockOffsets. +// Check the length with: +// +// len(mockedDevice.SetClockOffsetsCalls()) +func (mock *Device) SetClockOffsetsCalls() []struct { + ClockOffset nvml.ClockOffset +} { + var calls []struct { + ClockOffset nvml.ClockOffset + } + mock.lockSetClockOffsets.RLock() + calls = mock.calls.SetClockOffsets + mock.lockSetClockOffsets.RUnlock() + return calls +} + // SetComputeMode calls SetComputeModeFunc. func (mock *Device) SetComputeMode(computeMode nvml.ComputeMode) nvml.Return { if mock.SetComputeModeFunc == nil { @@ -7548,6 +9151,38 @@ func (mock *Device) SetComputeModeCalls() []struct { return calls } +// SetConfComputeUnprotectedMemSize calls SetConfComputeUnprotectedMemSizeFunc. +func (mock *Device) SetConfComputeUnprotectedMemSize(v uint64) nvml.Return { + if mock.SetConfComputeUnprotectedMemSizeFunc == nil { + panic("Device.SetConfComputeUnprotectedMemSizeFunc: method is nil but Device.SetConfComputeUnprotectedMemSize was just called") + } + callInfo := struct { + V uint64 + }{ + V: v, + } + mock.lockSetConfComputeUnprotectedMemSize.Lock() + mock.calls.SetConfComputeUnprotectedMemSize = append(mock.calls.SetConfComputeUnprotectedMemSize, callInfo) + mock.lockSetConfComputeUnprotectedMemSize.Unlock() + return mock.SetConfComputeUnprotectedMemSizeFunc(v) +} + +// SetConfComputeUnprotectedMemSizeCalls gets all the calls that were made to SetConfComputeUnprotectedMemSize. +// Check the length with: +// +// len(mockedDevice.SetConfComputeUnprotectedMemSizeCalls()) +func (mock *Device) SetConfComputeUnprotectedMemSizeCalls() []struct { + V uint64 +} { + var calls []struct { + V uint64 + } + mock.lockSetConfComputeUnprotectedMemSize.RLock() + calls = mock.calls.SetConfComputeUnprotectedMemSize + mock.lockSetConfComputeUnprotectedMemSize.RUnlock() + return calls +} + // SetCpuAffinity calls SetCpuAffinityFunc. func (mock *Device) SetCpuAffinity() nvml.Return { if mock.SetCpuAffinityFunc == nil { @@ -7643,6 +9278,38 @@ func (mock *Device) SetDefaultFanSpeed_v2Calls() []struct { return calls } +// SetDramEncryptionMode calls SetDramEncryptionModeFunc. +func (mock *Device) SetDramEncryptionMode(dramEncryptionInfo *nvml.DramEncryptionInfo) nvml.Return { + if mock.SetDramEncryptionModeFunc == nil { + panic("Device.SetDramEncryptionModeFunc: method is nil but Device.SetDramEncryptionMode was just called") + } + callInfo := struct { + DramEncryptionInfo *nvml.DramEncryptionInfo + }{ + DramEncryptionInfo: dramEncryptionInfo, + } + mock.lockSetDramEncryptionMode.Lock() + mock.calls.SetDramEncryptionMode = append(mock.calls.SetDramEncryptionMode, callInfo) + mock.lockSetDramEncryptionMode.Unlock() + return mock.SetDramEncryptionModeFunc(dramEncryptionInfo) +} + +// SetDramEncryptionModeCalls gets all the calls that were made to SetDramEncryptionMode. +// Check the length with: +// +// len(mockedDevice.SetDramEncryptionModeCalls()) +func (mock *Device) SetDramEncryptionModeCalls() []struct { + DramEncryptionInfo *nvml.DramEncryptionInfo +} { + var calls []struct { + DramEncryptionInfo *nvml.DramEncryptionInfo + } + mock.lockSetDramEncryptionMode.RLock() + calls = mock.calls.SetDramEncryptionMode + mock.lockSetDramEncryptionMode.RUnlock() + return calls +} + // SetDriverModel calls SetDriverModelFunc. func (mock *Device) SetDriverModel(driverModel nvml.DriverModel, v uint32) nvml.Return { if mock.SetDriverModelFunc == nil { @@ -8059,6 +9726,38 @@ func (mock *Device) SetNvLinkUtilizationControlCalls() []struct { return calls } +// SetNvlinkBwMode calls SetNvlinkBwModeFunc. +func (mock *Device) SetNvlinkBwMode(nvlinkSetBwMode *nvml.NvlinkSetBwMode) nvml.Return { + if mock.SetNvlinkBwModeFunc == nil { + panic("Device.SetNvlinkBwModeFunc: method is nil but Device.SetNvlinkBwMode was just called") + } + callInfo := struct { + NvlinkSetBwMode *nvml.NvlinkSetBwMode + }{ + NvlinkSetBwMode: nvlinkSetBwMode, + } + mock.lockSetNvlinkBwMode.Lock() + mock.calls.SetNvlinkBwMode = append(mock.calls.SetNvlinkBwMode, callInfo) + mock.lockSetNvlinkBwMode.Unlock() + return mock.SetNvlinkBwModeFunc(nvlinkSetBwMode) +} + +// SetNvlinkBwModeCalls gets all the calls that were made to SetNvlinkBwMode. +// Check the length with: +// +// len(mockedDevice.SetNvlinkBwModeCalls()) +func (mock *Device) SetNvlinkBwModeCalls() []struct { + NvlinkSetBwMode *nvml.NvlinkSetBwMode +} { + var calls []struct { + NvlinkSetBwMode *nvml.NvlinkSetBwMode + } + mock.lockSetNvlinkBwMode.RLock() + calls = mock.calls.SetNvlinkBwMode + mock.lockSetNvlinkBwMode.RUnlock() + return calls +} + // SetPersistenceMode calls SetPersistenceModeFunc. func (mock *Device) SetPersistenceMode(enableState nvml.EnableState) nvml.Return { if mock.SetPersistenceModeFunc == nil { @@ -8123,6 +9822,38 @@ func (mock *Device) SetPowerManagementLimitCalls() []struct { return calls } +// SetPowerManagementLimit_v2 calls SetPowerManagementLimit_v2Func. +func (mock *Device) SetPowerManagementLimit_v2(powerValue_v2 *nvml.PowerValue_v2) nvml.Return { + if mock.SetPowerManagementLimit_v2Func == nil { + panic("Device.SetPowerManagementLimit_v2Func: method is nil but Device.SetPowerManagementLimit_v2 was just called") + } + callInfo := struct { + PowerValue_v2 *nvml.PowerValue_v2 + }{ + PowerValue_v2: powerValue_v2, + } + mock.lockSetPowerManagementLimit_v2.Lock() + mock.calls.SetPowerManagementLimit_v2 = append(mock.calls.SetPowerManagementLimit_v2, callInfo) + mock.lockSetPowerManagementLimit_v2.Unlock() + return mock.SetPowerManagementLimit_v2Func(powerValue_v2) +} + +// SetPowerManagementLimit_v2Calls gets all the calls that were made to SetPowerManagementLimit_v2. +// Check the length with: +// +// len(mockedDevice.SetPowerManagementLimit_v2Calls()) +func (mock *Device) SetPowerManagementLimit_v2Calls() []struct { + PowerValue_v2 *nvml.PowerValue_v2 +} { + var calls []struct { + PowerValue_v2 *nvml.PowerValue_v2 + } + mock.lockSetPowerManagementLimit_v2.RLock() + calls = mock.calls.SetPowerManagementLimit_v2 + mock.lockSetPowerManagementLimit_v2.RUnlock() + return calls +} + // SetTemperatureThreshold calls SetTemperatureThresholdFunc. func (mock *Device) SetTemperatureThreshold(temperatureThresholds nvml.TemperatureThresholds, n int) nvml.Return { if mock.SetTemperatureThresholdFunc == nil { @@ -8159,6 +9890,74 @@ func (mock *Device) SetTemperatureThresholdCalls() []struct { return calls } +// SetVgpuCapabilities calls SetVgpuCapabilitiesFunc. +func (mock *Device) SetVgpuCapabilities(deviceVgpuCapability nvml.DeviceVgpuCapability, enableState nvml.EnableState) nvml.Return { + if mock.SetVgpuCapabilitiesFunc == nil { + panic("Device.SetVgpuCapabilitiesFunc: method is nil but Device.SetVgpuCapabilities was just called") + } + callInfo := struct { + DeviceVgpuCapability nvml.DeviceVgpuCapability + EnableState nvml.EnableState + }{ + DeviceVgpuCapability: deviceVgpuCapability, + EnableState: enableState, + } + mock.lockSetVgpuCapabilities.Lock() + mock.calls.SetVgpuCapabilities = append(mock.calls.SetVgpuCapabilities, callInfo) + mock.lockSetVgpuCapabilities.Unlock() + return mock.SetVgpuCapabilitiesFunc(deviceVgpuCapability, enableState) +} + +// SetVgpuCapabilitiesCalls gets all the calls that were made to SetVgpuCapabilities. +// Check the length with: +// +// len(mockedDevice.SetVgpuCapabilitiesCalls()) +func (mock *Device) SetVgpuCapabilitiesCalls() []struct { + DeviceVgpuCapability nvml.DeviceVgpuCapability + EnableState nvml.EnableState +} { + var calls []struct { + DeviceVgpuCapability nvml.DeviceVgpuCapability + EnableState nvml.EnableState + } + mock.lockSetVgpuCapabilities.RLock() + calls = mock.calls.SetVgpuCapabilities + mock.lockSetVgpuCapabilities.RUnlock() + return calls +} + +// SetVgpuHeterogeneousMode calls SetVgpuHeterogeneousModeFunc. +func (mock *Device) SetVgpuHeterogeneousMode(vgpuHeterogeneousMode nvml.VgpuHeterogeneousMode) nvml.Return { + if mock.SetVgpuHeterogeneousModeFunc == nil { + panic("Device.SetVgpuHeterogeneousModeFunc: method is nil but Device.SetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode + }{ + VgpuHeterogeneousMode: vgpuHeterogeneousMode, + } + mock.lockSetVgpuHeterogeneousMode.Lock() + mock.calls.SetVgpuHeterogeneousMode = append(mock.calls.SetVgpuHeterogeneousMode, callInfo) + mock.lockSetVgpuHeterogeneousMode.Unlock() + return mock.SetVgpuHeterogeneousModeFunc(vgpuHeterogeneousMode) +} + +// SetVgpuHeterogeneousModeCalls gets all the calls that were made to SetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedDevice.SetVgpuHeterogeneousModeCalls()) +func (mock *Device) SetVgpuHeterogeneousModeCalls() []struct { + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode +} { + var calls []struct { + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode + } + mock.lockSetVgpuHeterogeneousMode.RLock() + calls = mock.calls.SetVgpuHeterogeneousMode + mock.lockSetVgpuHeterogeneousMode.RUnlock() + return calls +} + // SetVgpuSchedulerState calls SetVgpuSchedulerStateFunc. func (mock *Device) SetVgpuSchedulerState(vgpuSchedulerSetState *nvml.VgpuSchedulerSetState) nvml.Return { if mock.SetVgpuSchedulerStateFunc == nil { @@ -8281,3 +10080,121 @@ func (mock *Device) VgpuTypeGetMaxInstancesCalls() []struct { mock.lockVgpuTypeGetMaxInstances.RUnlock() return calls } + +// WorkloadPowerProfileClearRequestedProfiles calls WorkloadPowerProfileClearRequestedProfilesFunc. +func (mock *Device) WorkloadPowerProfileClearRequestedProfiles(workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { + if mock.WorkloadPowerProfileClearRequestedProfilesFunc == nil { + panic("Device.WorkloadPowerProfileClearRequestedProfilesFunc: method is nil but Device.WorkloadPowerProfileClearRequestedProfiles was just called") + } + callInfo := struct { + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + }{ + WorkloadPowerProfileRequestedProfiles: workloadPowerProfileRequestedProfiles, + } + mock.lockWorkloadPowerProfileClearRequestedProfiles.Lock() + mock.calls.WorkloadPowerProfileClearRequestedProfiles = append(mock.calls.WorkloadPowerProfileClearRequestedProfiles, callInfo) + mock.lockWorkloadPowerProfileClearRequestedProfiles.Unlock() + return mock.WorkloadPowerProfileClearRequestedProfilesFunc(workloadPowerProfileRequestedProfiles) +} + +// WorkloadPowerProfileClearRequestedProfilesCalls gets all the calls that were made to WorkloadPowerProfileClearRequestedProfiles. +// Check the length with: +// +// len(mockedDevice.WorkloadPowerProfileClearRequestedProfilesCalls()) +func (mock *Device) WorkloadPowerProfileClearRequestedProfilesCalls() []struct { + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles +} { + var calls []struct { + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + mock.lockWorkloadPowerProfileClearRequestedProfiles.RLock() + calls = mock.calls.WorkloadPowerProfileClearRequestedProfiles + mock.lockWorkloadPowerProfileClearRequestedProfiles.RUnlock() + return calls +} + +// WorkloadPowerProfileGetCurrentProfiles calls WorkloadPowerProfileGetCurrentProfilesFunc. +func (mock *Device) WorkloadPowerProfileGetCurrentProfiles() (nvml.WorkloadPowerProfileCurrentProfiles, nvml.Return) { + if mock.WorkloadPowerProfileGetCurrentProfilesFunc == nil { + panic("Device.WorkloadPowerProfileGetCurrentProfilesFunc: method is nil but Device.WorkloadPowerProfileGetCurrentProfiles was just called") + } + callInfo := struct { + }{} + mock.lockWorkloadPowerProfileGetCurrentProfiles.Lock() + mock.calls.WorkloadPowerProfileGetCurrentProfiles = append(mock.calls.WorkloadPowerProfileGetCurrentProfiles, callInfo) + mock.lockWorkloadPowerProfileGetCurrentProfiles.Unlock() + return mock.WorkloadPowerProfileGetCurrentProfilesFunc() +} + +// WorkloadPowerProfileGetCurrentProfilesCalls gets all the calls that were made to WorkloadPowerProfileGetCurrentProfiles. +// Check the length with: +// +// len(mockedDevice.WorkloadPowerProfileGetCurrentProfilesCalls()) +func (mock *Device) WorkloadPowerProfileGetCurrentProfilesCalls() []struct { +} { + var calls []struct { + } + mock.lockWorkloadPowerProfileGetCurrentProfiles.RLock() + calls = mock.calls.WorkloadPowerProfileGetCurrentProfiles + mock.lockWorkloadPowerProfileGetCurrentProfiles.RUnlock() + return calls +} + +// WorkloadPowerProfileGetProfilesInfo calls WorkloadPowerProfileGetProfilesInfoFunc. +func (mock *Device) WorkloadPowerProfileGetProfilesInfo() (nvml.WorkloadPowerProfileProfilesInfo, nvml.Return) { + if mock.WorkloadPowerProfileGetProfilesInfoFunc == nil { + panic("Device.WorkloadPowerProfileGetProfilesInfoFunc: method is nil but Device.WorkloadPowerProfileGetProfilesInfo was just called") + } + callInfo := struct { + }{} + mock.lockWorkloadPowerProfileGetProfilesInfo.Lock() + mock.calls.WorkloadPowerProfileGetProfilesInfo = append(mock.calls.WorkloadPowerProfileGetProfilesInfo, callInfo) + mock.lockWorkloadPowerProfileGetProfilesInfo.Unlock() + return mock.WorkloadPowerProfileGetProfilesInfoFunc() +} + +// WorkloadPowerProfileGetProfilesInfoCalls gets all the calls that were made to WorkloadPowerProfileGetProfilesInfo. +// Check the length with: +// +// len(mockedDevice.WorkloadPowerProfileGetProfilesInfoCalls()) +func (mock *Device) WorkloadPowerProfileGetProfilesInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockWorkloadPowerProfileGetProfilesInfo.RLock() + calls = mock.calls.WorkloadPowerProfileGetProfilesInfo + mock.lockWorkloadPowerProfileGetProfilesInfo.RUnlock() + return calls +} + +// WorkloadPowerProfileSetRequestedProfiles calls WorkloadPowerProfileSetRequestedProfilesFunc. +func (mock *Device) WorkloadPowerProfileSetRequestedProfiles(workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { + if mock.WorkloadPowerProfileSetRequestedProfilesFunc == nil { + panic("Device.WorkloadPowerProfileSetRequestedProfilesFunc: method is nil but Device.WorkloadPowerProfileSetRequestedProfiles was just called") + } + callInfo := struct { + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + }{ + WorkloadPowerProfileRequestedProfiles: workloadPowerProfileRequestedProfiles, + } + mock.lockWorkloadPowerProfileSetRequestedProfiles.Lock() + mock.calls.WorkloadPowerProfileSetRequestedProfiles = append(mock.calls.WorkloadPowerProfileSetRequestedProfiles, callInfo) + mock.lockWorkloadPowerProfileSetRequestedProfiles.Unlock() + return mock.WorkloadPowerProfileSetRequestedProfilesFunc(workloadPowerProfileRequestedProfiles) +} + +// WorkloadPowerProfileSetRequestedProfilesCalls gets all the calls that were made to WorkloadPowerProfileSetRequestedProfiles. +// Check the length with: +// +// len(mockedDevice.WorkloadPowerProfileSetRequestedProfilesCalls()) +func (mock *Device) WorkloadPowerProfileSetRequestedProfilesCalls() []struct { + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles +} { + var calls []struct { + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + mock.lockWorkloadPowerProfileSetRequestedProfiles.RLock() + calls = mock.calls.WorkloadPowerProfileSetRequestedProfiles + mock.lockWorkloadPowerProfileSetRequestedProfiles.RUnlock() + return calls +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/gpuinstance.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/gpuinstance.go index e084df2..d05bc34 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/gpuinstance.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/gpuinstance.go @@ -27,6 +27,9 @@ var _ nvml.GpuInstance = &GpuInstance{} // DestroyFunc: func() nvml.Return { // panic("mock out the Destroy method") // }, +// GetActiveVgpusFunc: func() (nvml.ActiveVgpuInstanceInfo, nvml.Return) { +// panic("mock out the GetActiveVgpus method") +// }, // GetComputeInstanceByIdFunc: func(n int) (nvml.ComputeInstance, nvml.Return) { // panic("mock out the GetComputeInstanceById method") // }, @@ -36,7 +39,7 @@ var _ nvml.GpuInstance = &GpuInstance{} // GetComputeInstanceProfileInfoFunc: func(n1 int, n2 int) (nvml.ComputeInstanceProfileInfo, nvml.Return) { // panic("mock out the GetComputeInstanceProfileInfo method") // }, -// GetComputeInstanceProfileInfoVFunc: func(n1 int, n2 int) nvml.ComputeInstanceProfileInfoV { +// GetComputeInstanceProfileInfoVFunc: func(n1 int, n2 int) nvml.ComputeInstanceProfileInfoHandler { // panic("mock out the GetComputeInstanceProfileInfoV method") // }, // GetComputeInstanceRemainingCapacityFunc: func(computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (int, nvml.Return) { @@ -45,9 +48,30 @@ var _ nvml.GpuInstance = &GpuInstance{} // GetComputeInstancesFunc: func(computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) ([]nvml.ComputeInstance, nvml.Return) { // panic("mock out the GetComputeInstances method") // }, +// GetCreatableVgpusFunc: func() (nvml.VgpuTypeIdInfo, nvml.Return) { +// panic("mock out the GetCreatableVgpus method") +// }, // GetInfoFunc: func() (nvml.GpuInstanceInfo, nvml.Return) { // panic("mock out the GetInfo method") // }, +// GetVgpuHeterogeneousModeFunc: func() (nvml.VgpuHeterogeneousMode, nvml.Return) { +// panic("mock out the GetVgpuHeterogeneousMode method") +// }, +// GetVgpuSchedulerLogFunc: func() (nvml.VgpuSchedulerLogInfo, nvml.Return) { +// panic("mock out the GetVgpuSchedulerLog method") +// }, +// GetVgpuSchedulerStateFunc: func() (nvml.VgpuSchedulerStateInfo, nvml.Return) { +// panic("mock out the GetVgpuSchedulerState method") +// }, +// GetVgpuTypeCreatablePlacementsFunc: func() (nvml.VgpuCreatablePlacementInfo, nvml.Return) { +// panic("mock out the GetVgpuTypeCreatablePlacements method") +// }, +// SetVgpuHeterogeneousModeFunc: func(vgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode) nvml.Return { +// panic("mock out the SetVgpuHeterogeneousMode method") +// }, +// SetVgpuSchedulerStateFunc: func(vgpuSchedulerState *nvml.VgpuSchedulerState) nvml.Return { +// panic("mock out the SetVgpuSchedulerState method") +// }, // } // // // use mockedGpuInstance in code that requires nvml.GpuInstance @@ -64,6 +88,9 @@ type GpuInstance struct { // DestroyFunc mocks the Destroy method. DestroyFunc func() nvml.Return + // GetActiveVgpusFunc mocks the GetActiveVgpus method. + GetActiveVgpusFunc func() (nvml.ActiveVgpuInstanceInfo, nvml.Return) + // GetComputeInstanceByIdFunc mocks the GetComputeInstanceById method. GetComputeInstanceByIdFunc func(n int) (nvml.ComputeInstance, nvml.Return) @@ -74,7 +101,7 @@ type GpuInstance struct { GetComputeInstanceProfileInfoFunc func(n1 int, n2 int) (nvml.ComputeInstanceProfileInfo, nvml.Return) // GetComputeInstanceProfileInfoVFunc mocks the GetComputeInstanceProfileInfoV method. - GetComputeInstanceProfileInfoVFunc func(n1 int, n2 int) nvml.ComputeInstanceProfileInfoV + GetComputeInstanceProfileInfoVFunc func(n1 int, n2 int) nvml.ComputeInstanceProfileInfoHandler // GetComputeInstanceRemainingCapacityFunc mocks the GetComputeInstanceRemainingCapacity method. GetComputeInstanceRemainingCapacityFunc func(computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (int, nvml.Return) @@ -82,9 +109,30 @@ type GpuInstance struct { // GetComputeInstancesFunc mocks the GetComputeInstances method. GetComputeInstancesFunc func(computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) ([]nvml.ComputeInstance, nvml.Return) + // GetCreatableVgpusFunc mocks the GetCreatableVgpus method. + GetCreatableVgpusFunc func() (nvml.VgpuTypeIdInfo, nvml.Return) + // GetInfoFunc mocks the GetInfo method. GetInfoFunc func() (nvml.GpuInstanceInfo, nvml.Return) + // GetVgpuHeterogeneousModeFunc mocks the GetVgpuHeterogeneousMode method. + GetVgpuHeterogeneousModeFunc func() (nvml.VgpuHeterogeneousMode, nvml.Return) + + // GetVgpuSchedulerLogFunc mocks the GetVgpuSchedulerLog method. + GetVgpuSchedulerLogFunc func() (nvml.VgpuSchedulerLogInfo, nvml.Return) + + // GetVgpuSchedulerStateFunc mocks the GetVgpuSchedulerState method. + GetVgpuSchedulerStateFunc func() (nvml.VgpuSchedulerStateInfo, nvml.Return) + + // GetVgpuTypeCreatablePlacementsFunc mocks the GetVgpuTypeCreatablePlacements method. + GetVgpuTypeCreatablePlacementsFunc func() (nvml.VgpuCreatablePlacementInfo, nvml.Return) + + // SetVgpuHeterogeneousModeFunc mocks the SetVgpuHeterogeneousMode method. + SetVgpuHeterogeneousModeFunc func(vgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode) nvml.Return + + // SetVgpuSchedulerStateFunc mocks the SetVgpuSchedulerState method. + SetVgpuSchedulerStateFunc func(vgpuSchedulerState *nvml.VgpuSchedulerState) nvml.Return + // calls tracks calls to the methods. calls struct { // CreateComputeInstance holds details about calls to the CreateComputeInstance method. @@ -102,6 +150,9 @@ type GpuInstance struct { // Destroy holds details about calls to the Destroy method. Destroy []struct { } + // GetActiveVgpus holds details about calls to the GetActiveVgpus method. + GetActiveVgpus []struct { + } // GetComputeInstanceById holds details about calls to the GetComputeInstanceById method. GetComputeInstanceById []struct { // N is the n argument value. @@ -136,20 +187,53 @@ type GpuInstance struct { // ComputeInstanceProfileInfo is the computeInstanceProfileInfo argument value. ComputeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo } + // GetCreatableVgpus holds details about calls to the GetCreatableVgpus method. + GetCreatableVgpus []struct { + } // GetInfo holds details about calls to the GetInfo method. GetInfo []struct { } + // GetVgpuHeterogeneousMode holds details about calls to the GetVgpuHeterogeneousMode method. + GetVgpuHeterogeneousMode []struct { + } + // GetVgpuSchedulerLog holds details about calls to the GetVgpuSchedulerLog method. + GetVgpuSchedulerLog []struct { + } + // GetVgpuSchedulerState holds details about calls to the GetVgpuSchedulerState method. + GetVgpuSchedulerState []struct { + } + // GetVgpuTypeCreatablePlacements holds details about calls to the GetVgpuTypeCreatablePlacements method. + GetVgpuTypeCreatablePlacements []struct { + } + // SetVgpuHeterogeneousMode holds details about calls to the SetVgpuHeterogeneousMode method. + SetVgpuHeterogeneousMode []struct { + // VgpuHeterogeneousMode is the vgpuHeterogeneousMode argument value. + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode + } + // SetVgpuSchedulerState holds details about calls to the SetVgpuSchedulerState method. + SetVgpuSchedulerState []struct { + // VgpuSchedulerState is the vgpuSchedulerState argument value. + VgpuSchedulerState *nvml.VgpuSchedulerState + } } lockCreateComputeInstance sync.RWMutex lockCreateComputeInstanceWithPlacement sync.RWMutex lockDestroy sync.RWMutex + lockGetActiveVgpus sync.RWMutex lockGetComputeInstanceById sync.RWMutex lockGetComputeInstancePossiblePlacements sync.RWMutex lockGetComputeInstanceProfileInfo sync.RWMutex lockGetComputeInstanceProfileInfoV sync.RWMutex lockGetComputeInstanceRemainingCapacity sync.RWMutex lockGetComputeInstances sync.RWMutex + lockGetCreatableVgpus sync.RWMutex lockGetInfo sync.RWMutex + lockGetVgpuHeterogeneousMode sync.RWMutex + lockGetVgpuSchedulerLog sync.RWMutex + lockGetVgpuSchedulerState sync.RWMutex + lockGetVgpuTypeCreatablePlacements sync.RWMutex + lockSetVgpuHeterogeneousMode sync.RWMutex + lockSetVgpuSchedulerState sync.RWMutex } // CreateComputeInstance calls CreateComputeInstanceFunc. @@ -247,6 +331,33 @@ func (mock *GpuInstance) DestroyCalls() []struct { return calls } +// GetActiveVgpus calls GetActiveVgpusFunc. +func (mock *GpuInstance) GetActiveVgpus() (nvml.ActiveVgpuInstanceInfo, nvml.Return) { + if mock.GetActiveVgpusFunc == nil { + panic("GpuInstance.GetActiveVgpusFunc: method is nil but GpuInstance.GetActiveVgpus was just called") + } + callInfo := struct { + }{} + mock.lockGetActiveVgpus.Lock() + mock.calls.GetActiveVgpus = append(mock.calls.GetActiveVgpus, callInfo) + mock.lockGetActiveVgpus.Unlock() + return mock.GetActiveVgpusFunc() +} + +// GetActiveVgpusCalls gets all the calls that were made to GetActiveVgpus. +// Check the length with: +// +// len(mockedGpuInstance.GetActiveVgpusCalls()) +func (mock *GpuInstance) GetActiveVgpusCalls() []struct { +} { + var calls []struct { + } + mock.lockGetActiveVgpus.RLock() + calls = mock.calls.GetActiveVgpus + mock.lockGetActiveVgpus.RUnlock() + return calls +} + // GetComputeInstanceById calls GetComputeInstanceByIdFunc. func (mock *GpuInstance) GetComputeInstanceById(n int) (nvml.ComputeInstance, nvml.Return) { if mock.GetComputeInstanceByIdFunc == nil { @@ -348,7 +459,7 @@ func (mock *GpuInstance) GetComputeInstanceProfileInfoCalls() []struct { } // GetComputeInstanceProfileInfoV calls GetComputeInstanceProfileInfoVFunc. -func (mock *GpuInstance) GetComputeInstanceProfileInfoV(n1 int, n2 int) nvml.ComputeInstanceProfileInfoV { +func (mock *GpuInstance) GetComputeInstanceProfileInfoV(n1 int, n2 int) nvml.ComputeInstanceProfileInfoHandler { if mock.GetComputeInstanceProfileInfoVFunc == nil { panic("GpuInstance.GetComputeInstanceProfileInfoVFunc: method is nil but GpuInstance.GetComputeInstanceProfileInfoV was just called") } @@ -447,6 +558,33 @@ func (mock *GpuInstance) GetComputeInstancesCalls() []struct { return calls } +// GetCreatableVgpus calls GetCreatableVgpusFunc. +func (mock *GpuInstance) GetCreatableVgpus() (nvml.VgpuTypeIdInfo, nvml.Return) { + if mock.GetCreatableVgpusFunc == nil { + panic("GpuInstance.GetCreatableVgpusFunc: method is nil but GpuInstance.GetCreatableVgpus was just called") + } + callInfo := struct { + }{} + mock.lockGetCreatableVgpus.Lock() + mock.calls.GetCreatableVgpus = append(mock.calls.GetCreatableVgpus, callInfo) + mock.lockGetCreatableVgpus.Unlock() + return mock.GetCreatableVgpusFunc() +} + +// GetCreatableVgpusCalls gets all the calls that were made to GetCreatableVgpus. +// Check the length with: +// +// len(mockedGpuInstance.GetCreatableVgpusCalls()) +func (mock *GpuInstance) GetCreatableVgpusCalls() []struct { +} { + var calls []struct { + } + mock.lockGetCreatableVgpus.RLock() + calls = mock.calls.GetCreatableVgpus + mock.lockGetCreatableVgpus.RUnlock() + return calls +} + // GetInfo calls GetInfoFunc. func (mock *GpuInstance) GetInfo() (nvml.GpuInstanceInfo, nvml.Return) { if mock.GetInfoFunc == nil { @@ -473,3 +611,175 @@ func (mock *GpuInstance) GetInfoCalls() []struct { mock.lockGetInfo.RUnlock() return calls } + +// GetVgpuHeterogeneousMode calls GetVgpuHeterogeneousModeFunc. +func (mock *GpuInstance) GetVgpuHeterogeneousMode() (nvml.VgpuHeterogeneousMode, nvml.Return) { + if mock.GetVgpuHeterogeneousModeFunc == nil { + panic("GpuInstance.GetVgpuHeterogeneousModeFunc: method is nil but GpuInstance.GetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuHeterogeneousMode.Lock() + mock.calls.GetVgpuHeterogeneousMode = append(mock.calls.GetVgpuHeterogeneousMode, callInfo) + mock.lockGetVgpuHeterogeneousMode.Unlock() + return mock.GetVgpuHeterogeneousModeFunc() +} + +// GetVgpuHeterogeneousModeCalls gets all the calls that were made to GetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedGpuInstance.GetVgpuHeterogeneousModeCalls()) +func (mock *GpuInstance) GetVgpuHeterogeneousModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuHeterogeneousMode.RLock() + calls = mock.calls.GetVgpuHeterogeneousMode + mock.lockGetVgpuHeterogeneousMode.RUnlock() + return calls +} + +// GetVgpuSchedulerLog calls GetVgpuSchedulerLogFunc. +func (mock *GpuInstance) GetVgpuSchedulerLog() (nvml.VgpuSchedulerLogInfo, nvml.Return) { + if mock.GetVgpuSchedulerLogFunc == nil { + panic("GpuInstance.GetVgpuSchedulerLogFunc: method is nil but GpuInstance.GetVgpuSchedulerLog was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuSchedulerLog.Lock() + mock.calls.GetVgpuSchedulerLog = append(mock.calls.GetVgpuSchedulerLog, callInfo) + mock.lockGetVgpuSchedulerLog.Unlock() + return mock.GetVgpuSchedulerLogFunc() +} + +// GetVgpuSchedulerLogCalls gets all the calls that were made to GetVgpuSchedulerLog. +// Check the length with: +// +// len(mockedGpuInstance.GetVgpuSchedulerLogCalls()) +func (mock *GpuInstance) GetVgpuSchedulerLogCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuSchedulerLog.RLock() + calls = mock.calls.GetVgpuSchedulerLog + mock.lockGetVgpuSchedulerLog.RUnlock() + return calls +} + +// GetVgpuSchedulerState calls GetVgpuSchedulerStateFunc. +func (mock *GpuInstance) GetVgpuSchedulerState() (nvml.VgpuSchedulerStateInfo, nvml.Return) { + if mock.GetVgpuSchedulerStateFunc == nil { + panic("GpuInstance.GetVgpuSchedulerStateFunc: method is nil but GpuInstance.GetVgpuSchedulerState was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuSchedulerState.Lock() + mock.calls.GetVgpuSchedulerState = append(mock.calls.GetVgpuSchedulerState, callInfo) + mock.lockGetVgpuSchedulerState.Unlock() + return mock.GetVgpuSchedulerStateFunc() +} + +// GetVgpuSchedulerStateCalls gets all the calls that were made to GetVgpuSchedulerState. +// Check the length with: +// +// len(mockedGpuInstance.GetVgpuSchedulerStateCalls()) +func (mock *GpuInstance) GetVgpuSchedulerStateCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuSchedulerState.RLock() + calls = mock.calls.GetVgpuSchedulerState + mock.lockGetVgpuSchedulerState.RUnlock() + return calls +} + +// GetVgpuTypeCreatablePlacements calls GetVgpuTypeCreatablePlacementsFunc. +func (mock *GpuInstance) GetVgpuTypeCreatablePlacements() (nvml.VgpuCreatablePlacementInfo, nvml.Return) { + if mock.GetVgpuTypeCreatablePlacementsFunc == nil { + panic("GpuInstance.GetVgpuTypeCreatablePlacementsFunc: method is nil but GpuInstance.GetVgpuTypeCreatablePlacements was just called") + } + callInfo := struct { + }{} + mock.lockGetVgpuTypeCreatablePlacements.Lock() + mock.calls.GetVgpuTypeCreatablePlacements = append(mock.calls.GetVgpuTypeCreatablePlacements, callInfo) + mock.lockGetVgpuTypeCreatablePlacements.Unlock() + return mock.GetVgpuTypeCreatablePlacementsFunc() +} + +// GetVgpuTypeCreatablePlacementsCalls gets all the calls that were made to GetVgpuTypeCreatablePlacements. +// Check the length with: +// +// len(mockedGpuInstance.GetVgpuTypeCreatablePlacementsCalls()) +func (mock *GpuInstance) GetVgpuTypeCreatablePlacementsCalls() []struct { +} { + var calls []struct { + } + mock.lockGetVgpuTypeCreatablePlacements.RLock() + calls = mock.calls.GetVgpuTypeCreatablePlacements + mock.lockGetVgpuTypeCreatablePlacements.RUnlock() + return calls +} + +// SetVgpuHeterogeneousMode calls SetVgpuHeterogeneousModeFunc. +func (mock *GpuInstance) SetVgpuHeterogeneousMode(vgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode) nvml.Return { + if mock.SetVgpuHeterogeneousModeFunc == nil { + panic("GpuInstance.SetVgpuHeterogeneousModeFunc: method is nil but GpuInstance.SetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode + }{ + VgpuHeterogeneousMode: vgpuHeterogeneousMode, + } + mock.lockSetVgpuHeterogeneousMode.Lock() + mock.calls.SetVgpuHeterogeneousMode = append(mock.calls.SetVgpuHeterogeneousMode, callInfo) + mock.lockSetVgpuHeterogeneousMode.Unlock() + return mock.SetVgpuHeterogeneousModeFunc(vgpuHeterogeneousMode) +} + +// SetVgpuHeterogeneousModeCalls gets all the calls that were made to SetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedGpuInstance.SetVgpuHeterogeneousModeCalls()) +func (mock *GpuInstance) SetVgpuHeterogeneousModeCalls() []struct { + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode +} { + var calls []struct { + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode + } + mock.lockSetVgpuHeterogeneousMode.RLock() + calls = mock.calls.SetVgpuHeterogeneousMode + mock.lockSetVgpuHeterogeneousMode.RUnlock() + return calls +} + +// SetVgpuSchedulerState calls SetVgpuSchedulerStateFunc. +func (mock *GpuInstance) SetVgpuSchedulerState(vgpuSchedulerState *nvml.VgpuSchedulerState) nvml.Return { + if mock.SetVgpuSchedulerStateFunc == nil { + panic("GpuInstance.SetVgpuSchedulerStateFunc: method is nil but GpuInstance.SetVgpuSchedulerState was just called") + } + callInfo := struct { + VgpuSchedulerState *nvml.VgpuSchedulerState + }{ + VgpuSchedulerState: vgpuSchedulerState, + } + mock.lockSetVgpuSchedulerState.Lock() + mock.calls.SetVgpuSchedulerState = append(mock.calls.SetVgpuSchedulerState, callInfo) + mock.lockSetVgpuSchedulerState.Unlock() + return mock.SetVgpuSchedulerStateFunc(vgpuSchedulerState) +} + +// SetVgpuSchedulerStateCalls gets all the calls that were made to SetVgpuSchedulerState. +// Check the length with: +// +// len(mockedGpuInstance.SetVgpuSchedulerStateCalls()) +func (mock *GpuInstance) SetVgpuSchedulerStateCalls() []struct { + VgpuSchedulerState *nvml.VgpuSchedulerState +} { + var calls []struct { + VgpuSchedulerState *nvml.VgpuSchedulerState + } + mock.lockSetVgpuSchedulerState.RLock() + calls = mock.calls.SetVgpuSchedulerState + mock.lockSetVgpuSchedulerState.RUnlock() + return calls +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/interface.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/interface.go index 96739dd..ecaaf6c 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/interface.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/interface.go @@ -24,12 +24,6 @@ var _ nvml.Interface = &Interface{} // ComputeInstanceGetInfoFunc: func(computeInstance nvml.ComputeInstance) (nvml.ComputeInstanceInfo, nvml.Return) { // panic("mock out the ComputeInstanceGetInfo method") // }, -// DeviceCcuGetStreamStateFunc: func(device nvml.Device) (int, nvml.Return) { -// panic("mock out the DeviceCcuGetStreamState method") -// }, -// DeviceCcuSetStreamStateFunc: func(device nvml.Device, n int) nvml.Return { -// panic("mock out the DeviceCcuSetStreamState method") -// }, // DeviceClearAccountingPidsFunc: func(device nvml.Device) nvml.Return { // panic("mock out the DeviceClearAccountingPids method") // }, @@ -105,6 +99,12 @@ var _ nvml.Interface = &Interface{} // DeviceGetBusTypeFunc: func(device nvml.Device) (nvml.BusType, nvml.Return) { // panic("mock out the DeviceGetBusType method") // }, +// DeviceGetC2cModeInfoVFunc: func(device nvml.Device) nvml.C2cModeInfoHandler { +// panic("mock out the DeviceGetC2cModeInfoV method") +// }, +// DeviceGetCapabilitiesFunc: func(device nvml.Device) (nvml.DeviceCapabilities, nvml.Return) { +// panic("mock out the DeviceGetCapabilities method") +// }, // DeviceGetClkMonStatusFunc: func(device nvml.Device) (nvml.ClkMonStatus, nvml.Return) { // panic("mock out the DeviceGetClkMonStatus method") // }, @@ -114,6 +114,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetClockInfoFunc: func(device nvml.Device, clockType nvml.ClockType) (uint32, nvml.Return) { // panic("mock out the DeviceGetClockInfo method") // }, +// DeviceGetClockOffsetsFunc: func(device nvml.Device) (nvml.ClockOffset, nvml.Return) { +// panic("mock out the DeviceGetClockOffsets method") +// }, // DeviceGetComputeInstanceIdFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceGetComputeInstanceId method") // }, @@ -123,6 +126,21 @@ var _ nvml.Interface = &Interface{} // DeviceGetComputeRunningProcessesFunc: func(device nvml.Device) ([]nvml.ProcessInfo, nvml.Return) { // panic("mock out the DeviceGetComputeRunningProcesses method") // }, +// DeviceGetConfComputeGpuAttestationReportFunc: func(device nvml.Device) (nvml.ConfComputeGpuAttestationReport, nvml.Return) { +// panic("mock out the DeviceGetConfComputeGpuAttestationReport method") +// }, +// DeviceGetConfComputeGpuCertificateFunc: func(device nvml.Device) (nvml.ConfComputeGpuCertificate, nvml.Return) { +// panic("mock out the DeviceGetConfComputeGpuCertificate method") +// }, +// DeviceGetConfComputeMemSizeInfoFunc: func(device nvml.Device) (nvml.ConfComputeMemSizeInfo, nvml.Return) { +// panic("mock out the DeviceGetConfComputeMemSizeInfo method") +// }, +// DeviceGetConfComputeProtectedMemoryUsageFunc: func(device nvml.Device) (nvml.Memory, nvml.Return) { +// panic("mock out the DeviceGetConfComputeProtectedMemoryUsage method") +// }, +// DeviceGetCoolerInfoFunc: func(device nvml.Device) (nvml.CoolerInfo, nvml.Return) { +// panic("mock out the DeviceGetCoolerInfo method") +// }, // DeviceGetCountFunc: func() (int, nvml.Return) { // panic("mock out the DeviceGetCount method") // }, @@ -144,6 +162,12 @@ var _ nvml.Interface = &Interface{} // DeviceGetCurrPcieLinkWidthFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceGetCurrPcieLinkWidth method") // }, +// DeviceGetCurrentClockFreqsFunc: func(device nvml.Device) (nvml.DeviceCurrentClockFreqs, nvml.Return) { +// panic("mock out the DeviceGetCurrentClockFreqs method") +// }, +// DeviceGetCurrentClocksEventReasonsFunc: func(device nvml.Device) (uint64, nvml.Return) { +// panic("mock out the DeviceGetCurrentClocksEventReasons method") +// }, // DeviceGetCurrentClocksThrottleReasonsFunc: func(device nvml.Device) (uint64, nvml.Return) { // panic("mock out the DeviceGetCurrentClocksThrottleReasons method") // }, @@ -168,9 +192,15 @@ var _ nvml.Interface = &Interface{} // DeviceGetDisplayModeFunc: func(device nvml.Device) (nvml.EnableState, nvml.Return) { // panic("mock out the DeviceGetDisplayMode method") // }, +// DeviceGetDramEncryptionModeFunc: func(device nvml.Device) (nvml.DramEncryptionInfo, nvml.DramEncryptionInfo, nvml.Return) { +// panic("mock out the DeviceGetDramEncryptionMode method") +// }, // DeviceGetDriverModelFunc: func(device nvml.Device) (nvml.DriverModel, nvml.DriverModel, nvml.Return) { // panic("mock out the DeviceGetDriverModel method") // }, +// DeviceGetDriverModel_v2Func: func(device nvml.Device) (nvml.DriverModel, nvml.DriverModel, nvml.Return) { +// panic("mock out the DeviceGetDriverModel_v2 method") +// }, // DeviceGetDynamicPstatesInfoFunc: func(device nvml.Device) (nvml.GpuDynamicPstatesInfo, nvml.Return) { // panic("mock out the DeviceGetDynamicPstatesInfo method") // }, @@ -204,6 +234,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetFanSpeedFunc: func(device nvml.Device) (uint32, nvml.Return) { // panic("mock out the DeviceGetFanSpeed method") // }, +// DeviceGetFanSpeedRPMFunc: func(device nvml.Device) (nvml.FanSpeedInfo, nvml.Return) { +// panic("mock out the DeviceGetFanSpeedRPM method") +// }, // DeviceGetFanSpeed_v2Func: func(device nvml.Device, n int) (uint32, nvml.Return) { // panic("mock out the DeviceGetFanSpeed_v2 method") // }, @@ -219,6 +252,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetGpuFabricInfoFunc: func(device nvml.Device) (nvml.GpuFabricInfo, nvml.Return) { // panic("mock out the DeviceGetGpuFabricInfo method") // }, +// DeviceGetGpuFabricInfoVFunc: func(device nvml.Device) nvml.GpuFabricInfoHandler { +// panic("mock out the DeviceGetGpuFabricInfoV method") +// }, // DeviceGetGpuInstanceByIdFunc: func(device nvml.Device, n int) (nvml.GpuInstance, nvml.Return) { // panic("mock out the DeviceGetGpuInstanceById method") // }, @@ -231,7 +267,7 @@ var _ nvml.Interface = &Interface{} // DeviceGetGpuInstanceProfileInfoFunc: func(device nvml.Device, n int) (nvml.GpuInstanceProfileInfo, nvml.Return) { // panic("mock out the DeviceGetGpuInstanceProfileInfo method") // }, -// DeviceGetGpuInstanceProfileInfoVFunc: func(device nvml.Device, n int) nvml.GpuInstanceProfileInfoV { +// DeviceGetGpuInstanceProfileInfoVFunc: func(device nvml.Device, n int) nvml.GpuInstanceProfileInfoHandler { // panic("mock out the DeviceGetGpuInstanceProfileInfoV method") // }, // DeviceGetGpuInstanceRemainingCapacityFunc: func(device nvml.Device, gpuInstanceProfileInfo *nvml.GpuInstanceProfileInfo) (int, nvml.Return) { @@ -270,6 +306,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetHandleByUUIDFunc: func(s string) (nvml.Device, nvml.Return) { // panic("mock out the DeviceGetHandleByUUID method") // }, +// DeviceGetHandleByUUIDVFunc: func(uUID *nvml.UUID) (nvml.Device, nvml.Return) { +// panic("mock out the DeviceGetHandleByUUIDV method") +// }, // DeviceGetHostVgpuModeFunc: func(device nvml.Device) (nvml.HostVgpuMode, nvml.Return) { // panic("mock out the DeviceGetHostVgpuMode method") // }, @@ -288,9 +327,18 @@ var _ nvml.Interface = &Interface{} // DeviceGetIrqNumFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceGetIrqNum method") // }, +// DeviceGetJpgUtilizationFunc: func(device nvml.Device) (uint32, uint32, nvml.Return) { +// panic("mock out the DeviceGetJpgUtilization method") +// }, +// DeviceGetLastBBXFlushTimeFunc: func(device nvml.Device) (uint64, uint, nvml.Return) { +// panic("mock out the DeviceGetLastBBXFlushTime method") +// }, // DeviceGetMPSComputeRunningProcessesFunc: func(device nvml.Device) ([]nvml.ProcessInfo, nvml.Return) { // panic("mock out the DeviceGetMPSComputeRunningProcesses method") // }, +// DeviceGetMarginTemperatureFunc: func(device nvml.Device) (nvml.MarginTemperature, nvml.Return) { +// panic("mock out the DeviceGetMarginTemperature method") +// }, // DeviceGetMaxClockInfoFunc: func(device nvml.Device, clockType nvml.ClockType) (uint32, nvml.Return) { // panic("mock out the DeviceGetMaxClockInfo method") // }, @@ -342,6 +390,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetMinorNumberFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceGetMinorNumber method") // }, +// DeviceGetModuleIdFunc: func(device nvml.Device) (int, nvml.Return) { +// panic("mock out the DeviceGetModuleId method") +// }, // DeviceGetMultiGpuBoardFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceGetMultiGpuBoard method") // }, @@ -354,6 +405,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetNumGpuCoresFunc: func(device nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceGetNumGpuCores method") // }, +// DeviceGetNumaNodeIdFunc: func(device nvml.Device) (int, nvml.Return) { +// panic("mock out the DeviceGetNumaNodeId method") +// }, // DeviceGetNvLinkCapabilityFunc: func(device nvml.Device, n int, nvLinkCapability nvml.NvLinkCapability) (uint32, nvml.Return) { // panic("mock out the DeviceGetNvLinkCapability method") // }, @@ -378,12 +432,24 @@ var _ nvml.Interface = &Interface{} // DeviceGetNvLinkVersionFunc: func(device nvml.Device, n int) (uint32, nvml.Return) { // panic("mock out the DeviceGetNvLinkVersion method") // }, +// DeviceGetNvlinkBwModeFunc: func(device nvml.Device) (nvml.NvlinkGetBwMode, nvml.Return) { +// panic("mock out the DeviceGetNvlinkBwMode method") +// }, +// DeviceGetNvlinkSupportedBwModesFunc: func(device nvml.Device) (nvml.NvlinkSupportedBwModes, nvml.Return) { +// panic("mock out the DeviceGetNvlinkSupportedBwModes method") +// }, +// DeviceGetOfaUtilizationFunc: func(device nvml.Device) (uint32, uint32, nvml.Return) { +// panic("mock out the DeviceGetOfaUtilization method") +// }, // DeviceGetP2PStatusFunc: func(device1 nvml.Device, device2 nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) { // panic("mock out the DeviceGetP2PStatus method") // }, // DeviceGetPciInfoFunc: func(device nvml.Device) (nvml.PciInfo, nvml.Return) { // panic("mock out the DeviceGetPciInfo method") // }, +// DeviceGetPciInfoExtFunc: func(device nvml.Device) (nvml.PciInfoExt, nvml.Return) { +// panic("mock out the DeviceGetPciInfoExt method") +// }, // DeviceGetPcieLinkMaxSpeedFunc: func(device nvml.Device) (uint32, nvml.Return) { // panic("mock out the DeviceGetPcieLinkMaxSpeed method") // }, @@ -396,6 +462,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetPcieThroughputFunc: func(device nvml.Device, pcieUtilCounter nvml.PcieUtilCounter) (uint32, nvml.Return) { // panic("mock out the DeviceGetPcieThroughput method") // }, +// DeviceGetPerformanceModesFunc: func(device nvml.Device) (nvml.DevicePerfModes, nvml.Return) { +// panic("mock out the DeviceGetPerformanceModes method") +// }, // DeviceGetPerformanceStateFunc: func(device nvml.Device) (nvml.Pstates, nvml.Return) { // panic("mock out the DeviceGetPerformanceState method") // }, @@ -405,6 +474,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetPgpuMetadataStringFunc: func(device nvml.Device) (string, nvml.Return) { // panic("mock out the DeviceGetPgpuMetadataString method") // }, +// DeviceGetPlatformInfoFunc: func(device nvml.Device) (nvml.PlatformInfo, nvml.Return) { +// panic("mock out the DeviceGetPlatformInfo method") +// }, // DeviceGetPowerManagementDefaultLimitFunc: func(device nvml.Device) (uint32, nvml.Return) { // panic("mock out the DeviceGetPowerManagementDefaultLimit method") // }, @@ -429,6 +501,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetProcessUtilizationFunc: func(device nvml.Device, v uint64) ([]nvml.ProcessUtilizationSample, nvml.Return) { // panic("mock out the DeviceGetProcessUtilization method") // }, +// DeviceGetProcessesUtilizationInfoFunc: func(device nvml.Device) (nvml.ProcessesUtilizationInfo, nvml.Return) { +// panic("mock out the DeviceGetProcessesUtilizationInfo method") +// }, // DeviceGetRemappedRowsFunc: func(device nvml.Device) (int, int, bool, bool, nvml.Return) { // panic("mock out the DeviceGetRemappedRows method") // }, @@ -444,12 +519,21 @@ var _ nvml.Interface = &Interface{} // DeviceGetRowRemapperHistogramFunc: func(device nvml.Device) (nvml.RowRemapperHistogramValues, nvml.Return) { // panic("mock out the DeviceGetRowRemapperHistogram method") // }, +// DeviceGetRunningProcessDetailListFunc: func(device nvml.Device) (nvml.ProcessDetailList, nvml.Return) { +// panic("mock out the DeviceGetRunningProcessDetailList method") +// }, // DeviceGetSamplesFunc: func(device nvml.Device, samplingType nvml.SamplingType, v uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) { // panic("mock out the DeviceGetSamples method") // }, // DeviceGetSerialFunc: func(device nvml.Device) (string, nvml.Return) { // panic("mock out the DeviceGetSerial method") // }, +// DeviceGetSramEccErrorStatusFunc: func(device nvml.Device) (nvml.EccSramErrorStatus, nvml.Return) { +// panic("mock out the DeviceGetSramEccErrorStatus method") +// }, +// DeviceGetSupportedClocksEventReasonsFunc: func(device nvml.Device) (uint64, nvml.Return) { +// panic("mock out the DeviceGetSupportedClocksEventReasons method") +// }, // DeviceGetSupportedClocksThrottleReasonsFunc: func(device nvml.Device) (uint64, nvml.Return) { // panic("mock out the DeviceGetSupportedClocksThrottleReasons method") // }, @@ -477,6 +561,9 @@ var _ nvml.Interface = &Interface{} // DeviceGetTemperatureThresholdFunc: func(device nvml.Device, temperatureThresholds nvml.TemperatureThresholds) (uint32, nvml.Return) { // panic("mock out the DeviceGetTemperatureThreshold method") // }, +// DeviceGetTemperatureVFunc: func(device nvml.Device) nvml.TemperatureHandler { +// panic("mock out the DeviceGetTemperatureV method") +// }, // DeviceGetThermalSettingsFunc: func(device nvml.Device, v uint32) (nvml.GpuThermalSettings, nvml.Return) { // panic("mock out the DeviceGetThermalSettings method") // }, @@ -504,12 +591,21 @@ var _ nvml.Interface = &Interface{} // DeviceGetVgpuCapabilitiesFunc: func(device nvml.Device, deviceVgpuCapability nvml.DeviceVgpuCapability) (bool, nvml.Return) { // panic("mock out the DeviceGetVgpuCapabilities method") // }, +// DeviceGetVgpuHeterogeneousModeFunc: func(device nvml.Device) (nvml.VgpuHeterogeneousMode, nvml.Return) { +// panic("mock out the DeviceGetVgpuHeterogeneousMode method") +// }, +// DeviceGetVgpuInstancesUtilizationInfoFunc: func(device nvml.Device) (nvml.VgpuInstancesUtilizationInfo, nvml.Return) { +// panic("mock out the DeviceGetVgpuInstancesUtilizationInfo method") +// }, // DeviceGetVgpuMetadataFunc: func(device nvml.Device) (nvml.VgpuPgpuMetadata, nvml.Return) { // panic("mock out the DeviceGetVgpuMetadata method") // }, // DeviceGetVgpuProcessUtilizationFunc: func(device nvml.Device, v uint64) ([]nvml.VgpuProcessUtilizationSample, nvml.Return) { // panic("mock out the DeviceGetVgpuProcessUtilization method") // }, +// DeviceGetVgpuProcessesUtilizationInfoFunc: func(device nvml.Device) (nvml.VgpuProcessesUtilizationInfo, nvml.Return) { +// panic("mock out the DeviceGetVgpuProcessesUtilizationInfo method") +// }, // DeviceGetVgpuSchedulerCapabilitiesFunc: func(device nvml.Device) (nvml.VgpuSchedulerCapabilities, nvml.Return) { // panic("mock out the DeviceGetVgpuSchedulerCapabilities method") // }, @@ -519,6 +615,12 @@ var _ nvml.Interface = &Interface{} // DeviceGetVgpuSchedulerStateFunc: func(device nvml.Device) (nvml.VgpuSchedulerGetState, nvml.Return) { // panic("mock out the DeviceGetVgpuSchedulerState method") // }, +// DeviceGetVgpuTypeCreatablePlacementsFunc: func(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { +// panic("mock out the DeviceGetVgpuTypeCreatablePlacements method") +// }, +// DeviceGetVgpuTypeSupportedPlacementsFunc: func(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { +// panic("mock out the DeviceGetVgpuTypeSupportedPlacements method") +// }, // DeviceGetVgpuUtilizationFunc: func(device nvml.Device, v uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) { // panic("mock out the DeviceGetVgpuUtilization method") // }, @@ -537,6 +639,15 @@ var _ nvml.Interface = &Interface{} // DeviceOnSameBoardFunc: func(device1 nvml.Device, device2 nvml.Device) (int, nvml.Return) { // panic("mock out the DeviceOnSameBoard method") // }, +// DevicePowerSmoothingActivatePresetProfileFunc: func(device nvml.Device, powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { +// panic("mock out the DevicePowerSmoothingActivatePresetProfile method") +// }, +// DevicePowerSmoothingSetStateFunc: func(device nvml.Device, powerSmoothingState *nvml.PowerSmoothingState) nvml.Return { +// panic("mock out the DevicePowerSmoothingSetState method") +// }, +// DevicePowerSmoothingUpdatePresetProfileParamFunc: func(device nvml.Device, powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { +// panic("mock out the DevicePowerSmoothingUpdatePresetProfileParam method") +// }, // DeviceQueryDrainStateFunc: func(pciInfo *nvml.PciInfo) (nvml.EnableState, nvml.Return) { // panic("mock out the DeviceQueryDrainState method") // }, @@ -576,9 +687,15 @@ var _ nvml.Interface = &Interface{} // DeviceSetAutoBoostedClocksEnabledFunc: func(device nvml.Device, enableState nvml.EnableState) nvml.Return { // panic("mock out the DeviceSetAutoBoostedClocksEnabled method") // }, +// DeviceSetClockOffsetsFunc: func(device nvml.Device, clockOffset nvml.ClockOffset) nvml.Return { +// panic("mock out the DeviceSetClockOffsets method") +// }, // DeviceSetComputeModeFunc: func(device nvml.Device, computeMode nvml.ComputeMode) nvml.Return { // panic("mock out the DeviceSetComputeMode method") // }, +// DeviceSetConfComputeUnprotectedMemSizeFunc: func(device nvml.Device, v uint64) nvml.Return { +// panic("mock out the DeviceSetConfComputeUnprotectedMemSize method") +// }, // DeviceSetCpuAffinityFunc: func(device nvml.Device) nvml.Return { // panic("mock out the DeviceSetCpuAffinity method") // }, @@ -588,6 +705,9 @@ var _ nvml.Interface = &Interface{} // DeviceSetDefaultFanSpeed_v2Func: func(device nvml.Device, n int) nvml.Return { // panic("mock out the DeviceSetDefaultFanSpeed_v2 method") // }, +// DeviceSetDramEncryptionModeFunc: func(device nvml.Device, dramEncryptionInfo *nvml.DramEncryptionInfo) nvml.Return { +// panic("mock out the DeviceSetDramEncryptionMode method") +// }, // DeviceSetDriverModelFunc: func(device nvml.Device, driverModel nvml.DriverModel, v uint32) nvml.Return { // panic("mock out the DeviceSetDriverModel method") // }, @@ -624,15 +744,27 @@ var _ nvml.Interface = &Interface{} // DeviceSetNvLinkUtilizationControlFunc: func(device nvml.Device, n1 int, n2 int, nvLinkUtilizationControl *nvml.NvLinkUtilizationControl, b bool) nvml.Return { // panic("mock out the DeviceSetNvLinkUtilizationControl method") // }, +// DeviceSetNvlinkBwModeFunc: func(device nvml.Device, nvlinkSetBwMode *nvml.NvlinkSetBwMode) nvml.Return { +// panic("mock out the DeviceSetNvlinkBwMode method") +// }, // DeviceSetPersistenceModeFunc: func(device nvml.Device, enableState nvml.EnableState) nvml.Return { // panic("mock out the DeviceSetPersistenceMode method") // }, // DeviceSetPowerManagementLimitFunc: func(device nvml.Device, v uint32) nvml.Return { // panic("mock out the DeviceSetPowerManagementLimit method") // }, +// DeviceSetPowerManagementLimit_v2Func: func(device nvml.Device, powerValue_v2 *nvml.PowerValue_v2) nvml.Return { +// panic("mock out the DeviceSetPowerManagementLimit_v2 method") +// }, // DeviceSetTemperatureThresholdFunc: func(device nvml.Device, temperatureThresholds nvml.TemperatureThresholds, n int) nvml.Return { // panic("mock out the DeviceSetTemperatureThreshold method") // }, +// DeviceSetVgpuCapabilitiesFunc: func(device nvml.Device, deviceVgpuCapability nvml.DeviceVgpuCapability, enableState nvml.EnableState) nvml.Return { +// panic("mock out the DeviceSetVgpuCapabilities method") +// }, +// DeviceSetVgpuHeterogeneousModeFunc: func(device nvml.Device, vgpuHeterogeneousMode nvml.VgpuHeterogeneousMode) nvml.Return { +// panic("mock out the DeviceSetVgpuHeterogeneousMode method") +// }, // DeviceSetVgpuSchedulerStateFunc: func(device nvml.Device, vgpuSchedulerSetState *nvml.VgpuSchedulerSetState) nvml.Return { // panic("mock out the DeviceSetVgpuSchedulerState method") // }, @@ -642,6 +774,18 @@ var _ nvml.Interface = &Interface{} // DeviceValidateInforomFunc: func(device nvml.Device) nvml.Return { // panic("mock out the DeviceValidateInforom method") // }, +// DeviceWorkloadPowerProfileClearRequestedProfilesFunc: func(device nvml.Device, workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { +// panic("mock out the DeviceWorkloadPowerProfileClearRequestedProfiles method") +// }, +// DeviceWorkloadPowerProfileGetCurrentProfilesFunc: func(device nvml.Device) (nvml.WorkloadPowerProfileCurrentProfiles, nvml.Return) { +// panic("mock out the DeviceWorkloadPowerProfileGetCurrentProfiles method") +// }, +// DeviceWorkloadPowerProfileGetProfilesInfoFunc: func(device nvml.Device) (nvml.WorkloadPowerProfileProfilesInfo, nvml.Return) { +// panic("mock out the DeviceWorkloadPowerProfileGetProfilesInfo method") +// }, +// DeviceWorkloadPowerProfileSetRequestedProfilesFunc: func(device nvml.Device, workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { +// panic("mock out the DeviceWorkloadPowerProfileSetRequestedProfiles method") +// }, // ErrorStringFunc: func(returnMoqParam nvml.Return) string { // panic("mock out the ErrorString method") // }, @@ -687,6 +831,9 @@ var _ nvml.Interface = &Interface{} // GpmQueryDeviceSupportVFunc: func(device nvml.Device) nvml.GpmSupportV { // panic("mock out the GpmQueryDeviceSupportV method") // }, +// GpmQueryIfStreamingEnabledFunc: func(device nvml.Device) (uint32, nvml.Return) { +// panic("mock out the GpmQueryIfStreamingEnabled method") +// }, // GpmSampleAllocFunc: func() (nvml.GpmSample, nvml.Return) { // panic("mock out the GpmSampleAlloc method") // }, @@ -696,6 +843,9 @@ var _ nvml.Interface = &Interface{} // GpmSampleGetFunc: func(device nvml.Device, gpmSample nvml.GpmSample) nvml.Return { // panic("mock out the GpmSampleGet method") // }, +// GpmSetStreamingEnabledFunc: func(device nvml.Device, v uint32) nvml.Return { +// panic("mock out the GpmSetStreamingEnabled method") +// }, // GpuInstanceCreateComputeInstanceFunc: func(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (nvml.ComputeInstance, nvml.Return) { // panic("mock out the GpuInstanceCreateComputeInstance method") // }, @@ -705,6 +855,9 @@ var _ nvml.Interface = &Interface{} // GpuInstanceDestroyFunc: func(gpuInstance nvml.GpuInstance) nvml.Return { // panic("mock out the GpuInstanceDestroy method") // }, +// GpuInstanceGetActiveVgpusFunc: func(gpuInstance nvml.GpuInstance) (nvml.ActiveVgpuInstanceInfo, nvml.Return) { +// panic("mock out the GpuInstanceGetActiveVgpus method") +// }, // GpuInstanceGetComputeInstanceByIdFunc: func(gpuInstance nvml.GpuInstance, n int) (nvml.ComputeInstance, nvml.Return) { // panic("mock out the GpuInstanceGetComputeInstanceById method") // }, @@ -714,7 +867,7 @@ var _ nvml.Interface = &Interface{} // GpuInstanceGetComputeInstanceProfileInfoFunc: func(gpuInstance nvml.GpuInstance, n1 int, n2 int) (nvml.ComputeInstanceProfileInfo, nvml.Return) { // panic("mock out the GpuInstanceGetComputeInstanceProfileInfo method") // }, -// GpuInstanceGetComputeInstanceProfileInfoVFunc: func(gpuInstance nvml.GpuInstance, n1 int, n2 int) nvml.ComputeInstanceProfileInfoV { +// GpuInstanceGetComputeInstanceProfileInfoVFunc: func(gpuInstance nvml.GpuInstance, n1 int, n2 int) nvml.ComputeInstanceProfileInfoHandler { // panic("mock out the GpuInstanceGetComputeInstanceProfileInfoV method") // }, // GpuInstanceGetComputeInstanceRemainingCapacityFunc: func(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (int, nvml.Return) { @@ -723,9 +876,30 @@ var _ nvml.Interface = &Interface{} // GpuInstanceGetComputeInstancesFunc: func(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) ([]nvml.ComputeInstance, nvml.Return) { // panic("mock out the GpuInstanceGetComputeInstances method") // }, +// GpuInstanceGetCreatableVgpusFunc: func(gpuInstance nvml.GpuInstance) (nvml.VgpuTypeIdInfo, nvml.Return) { +// panic("mock out the GpuInstanceGetCreatableVgpus method") +// }, // GpuInstanceGetInfoFunc: func(gpuInstance nvml.GpuInstance) (nvml.GpuInstanceInfo, nvml.Return) { // panic("mock out the GpuInstanceGetInfo method") // }, +// GpuInstanceGetVgpuHeterogeneousModeFunc: func(gpuInstance nvml.GpuInstance) (nvml.VgpuHeterogeneousMode, nvml.Return) { +// panic("mock out the GpuInstanceGetVgpuHeterogeneousMode method") +// }, +// GpuInstanceGetVgpuSchedulerLogFunc: func(gpuInstance nvml.GpuInstance) (nvml.VgpuSchedulerLogInfo, nvml.Return) { +// panic("mock out the GpuInstanceGetVgpuSchedulerLog method") +// }, +// GpuInstanceGetVgpuSchedulerStateFunc: func(gpuInstance nvml.GpuInstance) (nvml.VgpuSchedulerStateInfo, nvml.Return) { +// panic("mock out the GpuInstanceGetVgpuSchedulerState method") +// }, +// GpuInstanceGetVgpuTypeCreatablePlacementsFunc: func(gpuInstance nvml.GpuInstance) (nvml.VgpuCreatablePlacementInfo, nvml.Return) { +// panic("mock out the GpuInstanceGetVgpuTypeCreatablePlacements method") +// }, +// GpuInstanceSetVgpuHeterogeneousModeFunc: func(gpuInstance nvml.GpuInstance, vgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode) nvml.Return { +// panic("mock out the GpuInstanceSetVgpuHeterogeneousMode method") +// }, +// GpuInstanceSetVgpuSchedulerStateFunc: func(gpuInstance nvml.GpuInstance, vgpuSchedulerState *nvml.VgpuSchedulerState) nvml.Return { +// panic("mock out the GpuInstanceSetVgpuSchedulerState method") +// }, // InitFunc: func() nvml.Return { // panic("mock out the Init method") // }, @@ -738,12 +912,39 @@ var _ nvml.Interface = &Interface{} // ShutdownFunc: func() nvml.Return { // panic("mock out the Shutdown method") // }, +// SystemEventSetCreateFunc: func(systemEventSetCreateRequest *nvml.SystemEventSetCreateRequest) nvml.Return { +// panic("mock out the SystemEventSetCreate method") +// }, +// SystemEventSetFreeFunc: func(systemEventSetFreeRequest *nvml.SystemEventSetFreeRequest) nvml.Return { +// panic("mock out the SystemEventSetFree method") +// }, +// SystemEventSetWaitFunc: func(systemEventSetWaitRequest *nvml.SystemEventSetWaitRequest) nvml.Return { +// panic("mock out the SystemEventSetWait method") +// }, +// SystemGetConfComputeCapabilitiesFunc: func() (nvml.ConfComputeSystemCaps, nvml.Return) { +// panic("mock out the SystemGetConfComputeCapabilities method") +// }, +// SystemGetConfComputeGpusReadyStateFunc: func() (uint32, nvml.Return) { +// panic("mock out the SystemGetConfComputeGpusReadyState method") +// }, +// SystemGetConfComputeKeyRotationThresholdInfoFunc: func() (nvml.ConfComputeGetKeyRotationThresholdInfo, nvml.Return) { +// panic("mock out the SystemGetConfComputeKeyRotationThresholdInfo method") +// }, +// SystemGetConfComputeSettingsFunc: func() (nvml.SystemConfComputeSettings, nvml.Return) { +// panic("mock out the SystemGetConfComputeSettings method") +// }, +// SystemGetConfComputeStateFunc: func() (nvml.ConfComputeSystemState, nvml.Return) { +// panic("mock out the SystemGetConfComputeState method") +// }, // SystemGetCudaDriverVersionFunc: func() (int, nvml.Return) { // panic("mock out the SystemGetCudaDriverVersion method") // }, // SystemGetCudaDriverVersion_v2Func: func() (int, nvml.Return) { // panic("mock out the SystemGetCudaDriverVersion_v2 method") // }, +// SystemGetDriverBranchFunc: func() (nvml.SystemDriverBranchInfo, nvml.Return) { +// panic("mock out the SystemGetDriverBranch method") +// }, // SystemGetDriverVersionFunc: func() (string, nvml.Return) { // panic("mock out the SystemGetDriverVersion method") // }, @@ -753,12 +954,27 @@ var _ nvml.Interface = &Interface{} // SystemGetNVMLVersionFunc: func() (string, nvml.Return) { // panic("mock out the SystemGetNVMLVersion method") // }, +// SystemGetNvlinkBwModeFunc: func() (uint32, nvml.Return) { +// panic("mock out the SystemGetNvlinkBwMode method") +// }, // SystemGetProcessNameFunc: func(n int) (string, nvml.Return) { // panic("mock out the SystemGetProcessName method") // }, // SystemGetTopologyGpuSetFunc: func(n int) ([]nvml.Device, nvml.Return) { // panic("mock out the SystemGetTopologyGpuSet method") // }, +// SystemRegisterEventsFunc: func(systemRegisterEventRequest *nvml.SystemRegisterEventRequest) nvml.Return { +// panic("mock out the SystemRegisterEvents method") +// }, +// SystemSetConfComputeGpusReadyStateFunc: func(v uint32) nvml.Return { +// panic("mock out the SystemSetConfComputeGpusReadyState method") +// }, +// SystemSetConfComputeKeyRotationThresholdInfoFunc: func(confComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo) nvml.Return { +// panic("mock out the SystemSetConfComputeKeyRotationThresholdInfo method") +// }, +// SystemSetNvlinkBwModeFunc: func(v uint32) nvml.Return { +// panic("mock out the SystemSetNvlinkBwMode method") +// }, // UnitGetCountFunc: func() (int, nvml.Return) { // panic("mock out the UnitGetCount method") // }, @@ -840,6 +1056,9 @@ var _ nvml.Interface = &Interface{} // VgpuInstanceGetMetadataFunc: func(vgpuInstance nvml.VgpuInstance) (nvml.VgpuMetadata, nvml.Return) { // panic("mock out the VgpuInstanceGetMetadata method") // }, +// VgpuInstanceGetRuntimeStateSizeFunc: func(vgpuInstance nvml.VgpuInstance) (nvml.VgpuRuntimeState, nvml.Return) { +// panic("mock out the VgpuInstanceGetRuntimeStateSize method") +// }, // VgpuInstanceGetTypeFunc: func(vgpuInstance nvml.VgpuInstance) (nvml.VgpuTypeId, nvml.Return) { // panic("mock out the VgpuInstanceGetType method") // }, @@ -855,6 +1074,9 @@ var _ nvml.Interface = &Interface{} // VgpuInstanceSetEncoderCapacityFunc: func(vgpuInstance nvml.VgpuInstance, n int) nvml.Return { // panic("mock out the VgpuInstanceSetEncoderCapacity method") // }, +// VgpuTypeGetBAR1InfoFunc: func(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuTypeBar1Info, nvml.Return) { +// panic("mock out the VgpuTypeGetBAR1Info method") +// }, // VgpuTypeGetCapabilitiesFunc: func(vgpuTypeId nvml.VgpuTypeId, vgpuCapability nvml.VgpuCapability) (bool, nvml.Return) { // panic("mock out the VgpuTypeGetCapabilities method") // }, @@ -879,6 +1101,9 @@ var _ nvml.Interface = &Interface{} // VgpuTypeGetMaxInstancesFunc: func(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) { // panic("mock out the VgpuTypeGetMaxInstances method") // }, +// VgpuTypeGetMaxInstancesPerGpuInstanceFunc: func(vgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance) nvml.Return { +// panic("mock out the VgpuTypeGetMaxInstancesPerGpuInstance method") +// }, // VgpuTypeGetMaxInstancesPerVmFunc: func(vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) { // panic("mock out the VgpuTypeGetMaxInstancesPerVm method") // }, @@ -904,12 +1129,6 @@ type Interface struct { // ComputeInstanceGetInfoFunc mocks the ComputeInstanceGetInfo method. ComputeInstanceGetInfoFunc func(computeInstance nvml.ComputeInstance) (nvml.ComputeInstanceInfo, nvml.Return) - // DeviceCcuGetStreamStateFunc mocks the DeviceCcuGetStreamState method. - DeviceCcuGetStreamStateFunc func(device nvml.Device) (int, nvml.Return) - - // DeviceCcuSetStreamStateFunc mocks the DeviceCcuSetStreamState method. - DeviceCcuSetStreamStateFunc func(device nvml.Device, n int) nvml.Return - // DeviceClearAccountingPidsFunc mocks the DeviceClearAccountingPids method. DeviceClearAccountingPidsFunc func(device nvml.Device) nvml.Return @@ -985,6 +1204,12 @@ type Interface struct { // DeviceGetBusTypeFunc mocks the DeviceGetBusType method. DeviceGetBusTypeFunc func(device nvml.Device) (nvml.BusType, nvml.Return) + // DeviceGetC2cModeInfoVFunc mocks the DeviceGetC2cModeInfoV method. + DeviceGetC2cModeInfoVFunc func(device nvml.Device) nvml.C2cModeInfoHandler + + // DeviceGetCapabilitiesFunc mocks the DeviceGetCapabilities method. + DeviceGetCapabilitiesFunc func(device nvml.Device) (nvml.DeviceCapabilities, nvml.Return) + // DeviceGetClkMonStatusFunc mocks the DeviceGetClkMonStatus method. DeviceGetClkMonStatusFunc func(device nvml.Device) (nvml.ClkMonStatus, nvml.Return) @@ -994,6 +1219,9 @@ type Interface struct { // DeviceGetClockInfoFunc mocks the DeviceGetClockInfo method. DeviceGetClockInfoFunc func(device nvml.Device, clockType nvml.ClockType) (uint32, nvml.Return) + // DeviceGetClockOffsetsFunc mocks the DeviceGetClockOffsets method. + DeviceGetClockOffsetsFunc func(device nvml.Device) (nvml.ClockOffset, nvml.Return) + // DeviceGetComputeInstanceIdFunc mocks the DeviceGetComputeInstanceId method. DeviceGetComputeInstanceIdFunc func(device nvml.Device) (int, nvml.Return) @@ -1003,6 +1231,21 @@ type Interface struct { // DeviceGetComputeRunningProcessesFunc mocks the DeviceGetComputeRunningProcesses method. DeviceGetComputeRunningProcessesFunc func(device nvml.Device) ([]nvml.ProcessInfo, nvml.Return) + // DeviceGetConfComputeGpuAttestationReportFunc mocks the DeviceGetConfComputeGpuAttestationReport method. + DeviceGetConfComputeGpuAttestationReportFunc func(device nvml.Device) (nvml.ConfComputeGpuAttestationReport, nvml.Return) + + // DeviceGetConfComputeGpuCertificateFunc mocks the DeviceGetConfComputeGpuCertificate method. + DeviceGetConfComputeGpuCertificateFunc func(device nvml.Device) (nvml.ConfComputeGpuCertificate, nvml.Return) + + // DeviceGetConfComputeMemSizeInfoFunc mocks the DeviceGetConfComputeMemSizeInfo method. + DeviceGetConfComputeMemSizeInfoFunc func(device nvml.Device) (nvml.ConfComputeMemSizeInfo, nvml.Return) + + // DeviceGetConfComputeProtectedMemoryUsageFunc mocks the DeviceGetConfComputeProtectedMemoryUsage method. + DeviceGetConfComputeProtectedMemoryUsageFunc func(device nvml.Device) (nvml.Memory, nvml.Return) + + // DeviceGetCoolerInfoFunc mocks the DeviceGetCoolerInfo method. + DeviceGetCoolerInfoFunc func(device nvml.Device) (nvml.CoolerInfo, nvml.Return) + // DeviceGetCountFunc mocks the DeviceGetCount method. DeviceGetCountFunc func() (int, nvml.Return) @@ -1024,6 +1267,12 @@ type Interface struct { // DeviceGetCurrPcieLinkWidthFunc mocks the DeviceGetCurrPcieLinkWidth method. DeviceGetCurrPcieLinkWidthFunc func(device nvml.Device) (int, nvml.Return) + // DeviceGetCurrentClockFreqsFunc mocks the DeviceGetCurrentClockFreqs method. + DeviceGetCurrentClockFreqsFunc func(device nvml.Device) (nvml.DeviceCurrentClockFreqs, nvml.Return) + + // DeviceGetCurrentClocksEventReasonsFunc mocks the DeviceGetCurrentClocksEventReasons method. + DeviceGetCurrentClocksEventReasonsFunc func(device nvml.Device) (uint64, nvml.Return) + // DeviceGetCurrentClocksThrottleReasonsFunc mocks the DeviceGetCurrentClocksThrottleReasons method. DeviceGetCurrentClocksThrottleReasonsFunc func(device nvml.Device) (uint64, nvml.Return) @@ -1048,9 +1297,15 @@ type Interface struct { // DeviceGetDisplayModeFunc mocks the DeviceGetDisplayMode method. DeviceGetDisplayModeFunc func(device nvml.Device) (nvml.EnableState, nvml.Return) + // DeviceGetDramEncryptionModeFunc mocks the DeviceGetDramEncryptionMode method. + DeviceGetDramEncryptionModeFunc func(device nvml.Device) (nvml.DramEncryptionInfo, nvml.DramEncryptionInfo, nvml.Return) + // DeviceGetDriverModelFunc mocks the DeviceGetDriverModel method. DeviceGetDriverModelFunc func(device nvml.Device) (nvml.DriverModel, nvml.DriverModel, nvml.Return) + // DeviceGetDriverModel_v2Func mocks the DeviceGetDriverModel_v2 method. + DeviceGetDriverModel_v2Func func(device nvml.Device) (nvml.DriverModel, nvml.DriverModel, nvml.Return) + // DeviceGetDynamicPstatesInfoFunc mocks the DeviceGetDynamicPstatesInfo method. DeviceGetDynamicPstatesInfoFunc func(device nvml.Device) (nvml.GpuDynamicPstatesInfo, nvml.Return) @@ -1084,6 +1339,9 @@ type Interface struct { // DeviceGetFanSpeedFunc mocks the DeviceGetFanSpeed method. DeviceGetFanSpeedFunc func(device nvml.Device) (uint32, nvml.Return) + // DeviceGetFanSpeedRPMFunc mocks the DeviceGetFanSpeedRPM method. + DeviceGetFanSpeedRPMFunc func(device nvml.Device) (nvml.FanSpeedInfo, nvml.Return) + // DeviceGetFanSpeed_v2Func mocks the DeviceGetFanSpeed_v2 method. DeviceGetFanSpeed_v2Func func(device nvml.Device, n int) (uint32, nvml.Return) @@ -1099,6 +1357,9 @@ type Interface struct { // DeviceGetGpuFabricInfoFunc mocks the DeviceGetGpuFabricInfo method. DeviceGetGpuFabricInfoFunc func(device nvml.Device) (nvml.GpuFabricInfo, nvml.Return) + // DeviceGetGpuFabricInfoVFunc mocks the DeviceGetGpuFabricInfoV method. + DeviceGetGpuFabricInfoVFunc func(device nvml.Device) nvml.GpuFabricInfoHandler + // DeviceGetGpuInstanceByIdFunc mocks the DeviceGetGpuInstanceById method. DeviceGetGpuInstanceByIdFunc func(device nvml.Device, n int) (nvml.GpuInstance, nvml.Return) @@ -1112,7 +1373,7 @@ type Interface struct { DeviceGetGpuInstanceProfileInfoFunc func(device nvml.Device, n int) (nvml.GpuInstanceProfileInfo, nvml.Return) // DeviceGetGpuInstanceProfileInfoVFunc mocks the DeviceGetGpuInstanceProfileInfoV method. - DeviceGetGpuInstanceProfileInfoVFunc func(device nvml.Device, n int) nvml.GpuInstanceProfileInfoV + DeviceGetGpuInstanceProfileInfoVFunc func(device nvml.Device, n int) nvml.GpuInstanceProfileInfoHandler // DeviceGetGpuInstanceRemainingCapacityFunc mocks the DeviceGetGpuInstanceRemainingCapacity method. DeviceGetGpuInstanceRemainingCapacityFunc func(device nvml.Device, gpuInstanceProfileInfo *nvml.GpuInstanceProfileInfo) (int, nvml.Return) @@ -1150,6 +1411,9 @@ type Interface struct { // DeviceGetHandleByUUIDFunc mocks the DeviceGetHandleByUUID method. DeviceGetHandleByUUIDFunc func(s string) (nvml.Device, nvml.Return) + // DeviceGetHandleByUUIDVFunc mocks the DeviceGetHandleByUUIDV method. + DeviceGetHandleByUUIDVFunc func(uUID *nvml.UUID) (nvml.Device, nvml.Return) + // DeviceGetHostVgpuModeFunc mocks the DeviceGetHostVgpuMode method. DeviceGetHostVgpuModeFunc func(device nvml.Device) (nvml.HostVgpuMode, nvml.Return) @@ -1168,9 +1432,18 @@ type Interface struct { // DeviceGetIrqNumFunc mocks the DeviceGetIrqNum method. DeviceGetIrqNumFunc func(device nvml.Device) (int, nvml.Return) + // DeviceGetJpgUtilizationFunc mocks the DeviceGetJpgUtilization method. + DeviceGetJpgUtilizationFunc func(device nvml.Device) (uint32, uint32, nvml.Return) + + // DeviceGetLastBBXFlushTimeFunc mocks the DeviceGetLastBBXFlushTime method. + DeviceGetLastBBXFlushTimeFunc func(device nvml.Device) (uint64, uint, nvml.Return) + // DeviceGetMPSComputeRunningProcessesFunc mocks the DeviceGetMPSComputeRunningProcesses method. DeviceGetMPSComputeRunningProcessesFunc func(device nvml.Device) ([]nvml.ProcessInfo, nvml.Return) + // DeviceGetMarginTemperatureFunc mocks the DeviceGetMarginTemperature method. + DeviceGetMarginTemperatureFunc func(device nvml.Device) (nvml.MarginTemperature, nvml.Return) + // DeviceGetMaxClockInfoFunc mocks the DeviceGetMaxClockInfo method. DeviceGetMaxClockInfoFunc func(device nvml.Device, clockType nvml.ClockType) (uint32, nvml.Return) @@ -1222,6 +1495,9 @@ type Interface struct { // DeviceGetMinorNumberFunc mocks the DeviceGetMinorNumber method. DeviceGetMinorNumberFunc func(device nvml.Device) (int, nvml.Return) + // DeviceGetModuleIdFunc mocks the DeviceGetModuleId method. + DeviceGetModuleIdFunc func(device nvml.Device) (int, nvml.Return) + // DeviceGetMultiGpuBoardFunc mocks the DeviceGetMultiGpuBoard method. DeviceGetMultiGpuBoardFunc func(device nvml.Device) (int, nvml.Return) @@ -1234,6 +1510,9 @@ type Interface struct { // DeviceGetNumGpuCoresFunc mocks the DeviceGetNumGpuCores method. DeviceGetNumGpuCoresFunc func(device nvml.Device) (int, nvml.Return) + // DeviceGetNumaNodeIdFunc mocks the DeviceGetNumaNodeId method. + DeviceGetNumaNodeIdFunc func(device nvml.Device) (int, nvml.Return) + // DeviceGetNvLinkCapabilityFunc mocks the DeviceGetNvLinkCapability method. DeviceGetNvLinkCapabilityFunc func(device nvml.Device, n int, nvLinkCapability nvml.NvLinkCapability) (uint32, nvml.Return) @@ -1258,12 +1537,24 @@ type Interface struct { // DeviceGetNvLinkVersionFunc mocks the DeviceGetNvLinkVersion method. DeviceGetNvLinkVersionFunc func(device nvml.Device, n int) (uint32, nvml.Return) + // DeviceGetNvlinkBwModeFunc mocks the DeviceGetNvlinkBwMode method. + DeviceGetNvlinkBwModeFunc func(device nvml.Device) (nvml.NvlinkGetBwMode, nvml.Return) + + // DeviceGetNvlinkSupportedBwModesFunc mocks the DeviceGetNvlinkSupportedBwModes method. + DeviceGetNvlinkSupportedBwModesFunc func(device nvml.Device) (nvml.NvlinkSupportedBwModes, nvml.Return) + + // DeviceGetOfaUtilizationFunc mocks the DeviceGetOfaUtilization method. + DeviceGetOfaUtilizationFunc func(device nvml.Device) (uint32, uint32, nvml.Return) + // DeviceGetP2PStatusFunc mocks the DeviceGetP2PStatus method. DeviceGetP2PStatusFunc func(device1 nvml.Device, device2 nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) // DeviceGetPciInfoFunc mocks the DeviceGetPciInfo method. DeviceGetPciInfoFunc func(device nvml.Device) (nvml.PciInfo, nvml.Return) + // DeviceGetPciInfoExtFunc mocks the DeviceGetPciInfoExt method. + DeviceGetPciInfoExtFunc func(device nvml.Device) (nvml.PciInfoExt, nvml.Return) + // DeviceGetPcieLinkMaxSpeedFunc mocks the DeviceGetPcieLinkMaxSpeed method. DeviceGetPcieLinkMaxSpeedFunc func(device nvml.Device) (uint32, nvml.Return) @@ -1276,6 +1567,9 @@ type Interface struct { // DeviceGetPcieThroughputFunc mocks the DeviceGetPcieThroughput method. DeviceGetPcieThroughputFunc func(device nvml.Device, pcieUtilCounter nvml.PcieUtilCounter) (uint32, nvml.Return) + // DeviceGetPerformanceModesFunc mocks the DeviceGetPerformanceModes method. + DeviceGetPerformanceModesFunc func(device nvml.Device) (nvml.DevicePerfModes, nvml.Return) + // DeviceGetPerformanceStateFunc mocks the DeviceGetPerformanceState method. DeviceGetPerformanceStateFunc func(device nvml.Device) (nvml.Pstates, nvml.Return) @@ -1285,6 +1579,9 @@ type Interface struct { // DeviceGetPgpuMetadataStringFunc mocks the DeviceGetPgpuMetadataString method. DeviceGetPgpuMetadataStringFunc func(device nvml.Device) (string, nvml.Return) + // DeviceGetPlatformInfoFunc mocks the DeviceGetPlatformInfo method. + DeviceGetPlatformInfoFunc func(device nvml.Device) (nvml.PlatformInfo, nvml.Return) + // DeviceGetPowerManagementDefaultLimitFunc mocks the DeviceGetPowerManagementDefaultLimit method. DeviceGetPowerManagementDefaultLimitFunc func(device nvml.Device) (uint32, nvml.Return) @@ -1309,6 +1606,9 @@ type Interface struct { // DeviceGetProcessUtilizationFunc mocks the DeviceGetProcessUtilization method. DeviceGetProcessUtilizationFunc func(device nvml.Device, v uint64) ([]nvml.ProcessUtilizationSample, nvml.Return) + // DeviceGetProcessesUtilizationInfoFunc mocks the DeviceGetProcessesUtilizationInfo method. + DeviceGetProcessesUtilizationInfoFunc func(device nvml.Device) (nvml.ProcessesUtilizationInfo, nvml.Return) + // DeviceGetRemappedRowsFunc mocks the DeviceGetRemappedRows method. DeviceGetRemappedRowsFunc func(device nvml.Device) (int, int, bool, bool, nvml.Return) @@ -1324,12 +1624,21 @@ type Interface struct { // DeviceGetRowRemapperHistogramFunc mocks the DeviceGetRowRemapperHistogram method. DeviceGetRowRemapperHistogramFunc func(device nvml.Device) (nvml.RowRemapperHistogramValues, nvml.Return) + // DeviceGetRunningProcessDetailListFunc mocks the DeviceGetRunningProcessDetailList method. + DeviceGetRunningProcessDetailListFunc func(device nvml.Device) (nvml.ProcessDetailList, nvml.Return) + // DeviceGetSamplesFunc mocks the DeviceGetSamples method. DeviceGetSamplesFunc func(device nvml.Device, samplingType nvml.SamplingType, v uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) // DeviceGetSerialFunc mocks the DeviceGetSerial method. DeviceGetSerialFunc func(device nvml.Device) (string, nvml.Return) + // DeviceGetSramEccErrorStatusFunc mocks the DeviceGetSramEccErrorStatus method. + DeviceGetSramEccErrorStatusFunc func(device nvml.Device) (nvml.EccSramErrorStatus, nvml.Return) + + // DeviceGetSupportedClocksEventReasonsFunc mocks the DeviceGetSupportedClocksEventReasons method. + DeviceGetSupportedClocksEventReasonsFunc func(device nvml.Device) (uint64, nvml.Return) + // DeviceGetSupportedClocksThrottleReasonsFunc mocks the DeviceGetSupportedClocksThrottleReasons method. DeviceGetSupportedClocksThrottleReasonsFunc func(device nvml.Device) (uint64, nvml.Return) @@ -1357,6 +1666,9 @@ type Interface struct { // DeviceGetTemperatureThresholdFunc mocks the DeviceGetTemperatureThreshold method. DeviceGetTemperatureThresholdFunc func(device nvml.Device, temperatureThresholds nvml.TemperatureThresholds) (uint32, nvml.Return) + // DeviceGetTemperatureVFunc mocks the DeviceGetTemperatureV method. + DeviceGetTemperatureVFunc func(device nvml.Device) nvml.TemperatureHandler + // DeviceGetThermalSettingsFunc mocks the DeviceGetThermalSettings method. DeviceGetThermalSettingsFunc func(device nvml.Device, v uint32) (nvml.GpuThermalSettings, nvml.Return) @@ -1384,12 +1696,21 @@ type Interface struct { // DeviceGetVgpuCapabilitiesFunc mocks the DeviceGetVgpuCapabilities method. DeviceGetVgpuCapabilitiesFunc func(device nvml.Device, deviceVgpuCapability nvml.DeviceVgpuCapability) (bool, nvml.Return) + // DeviceGetVgpuHeterogeneousModeFunc mocks the DeviceGetVgpuHeterogeneousMode method. + DeviceGetVgpuHeterogeneousModeFunc func(device nvml.Device) (nvml.VgpuHeterogeneousMode, nvml.Return) + + // DeviceGetVgpuInstancesUtilizationInfoFunc mocks the DeviceGetVgpuInstancesUtilizationInfo method. + DeviceGetVgpuInstancesUtilizationInfoFunc func(device nvml.Device) (nvml.VgpuInstancesUtilizationInfo, nvml.Return) + // DeviceGetVgpuMetadataFunc mocks the DeviceGetVgpuMetadata method. DeviceGetVgpuMetadataFunc func(device nvml.Device) (nvml.VgpuPgpuMetadata, nvml.Return) // DeviceGetVgpuProcessUtilizationFunc mocks the DeviceGetVgpuProcessUtilization method. DeviceGetVgpuProcessUtilizationFunc func(device nvml.Device, v uint64) ([]nvml.VgpuProcessUtilizationSample, nvml.Return) + // DeviceGetVgpuProcessesUtilizationInfoFunc mocks the DeviceGetVgpuProcessesUtilizationInfo method. + DeviceGetVgpuProcessesUtilizationInfoFunc func(device nvml.Device) (nvml.VgpuProcessesUtilizationInfo, nvml.Return) + // DeviceGetVgpuSchedulerCapabilitiesFunc mocks the DeviceGetVgpuSchedulerCapabilities method. DeviceGetVgpuSchedulerCapabilitiesFunc func(device nvml.Device) (nvml.VgpuSchedulerCapabilities, nvml.Return) @@ -1399,6 +1720,12 @@ type Interface struct { // DeviceGetVgpuSchedulerStateFunc mocks the DeviceGetVgpuSchedulerState method. DeviceGetVgpuSchedulerStateFunc func(device nvml.Device) (nvml.VgpuSchedulerGetState, nvml.Return) + // DeviceGetVgpuTypeCreatablePlacementsFunc mocks the DeviceGetVgpuTypeCreatablePlacements method. + DeviceGetVgpuTypeCreatablePlacementsFunc func(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) + + // DeviceGetVgpuTypeSupportedPlacementsFunc mocks the DeviceGetVgpuTypeSupportedPlacements method. + DeviceGetVgpuTypeSupportedPlacementsFunc func(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) + // DeviceGetVgpuUtilizationFunc mocks the DeviceGetVgpuUtilization method. DeviceGetVgpuUtilizationFunc func(device nvml.Device, v uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) @@ -1417,6 +1744,15 @@ type Interface struct { // DeviceOnSameBoardFunc mocks the DeviceOnSameBoard method. DeviceOnSameBoardFunc func(device1 nvml.Device, device2 nvml.Device) (int, nvml.Return) + // DevicePowerSmoothingActivatePresetProfileFunc mocks the DevicePowerSmoothingActivatePresetProfile method. + DevicePowerSmoothingActivatePresetProfileFunc func(device nvml.Device, powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return + + // DevicePowerSmoothingSetStateFunc mocks the DevicePowerSmoothingSetState method. + DevicePowerSmoothingSetStateFunc func(device nvml.Device, powerSmoothingState *nvml.PowerSmoothingState) nvml.Return + + // DevicePowerSmoothingUpdatePresetProfileParamFunc mocks the DevicePowerSmoothingUpdatePresetProfileParam method. + DevicePowerSmoothingUpdatePresetProfileParamFunc func(device nvml.Device, powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return + // DeviceQueryDrainStateFunc mocks the DeviceQueryDrainState method. DeviceQueryDrainStateFunc func(pciInfo *nvml.PciInfo) (nvml.EnableState, nvml.Return) @@ -1456,9 +1792,15 @@ type Interface struct { // DeviceSetAutoBoostedClocksEnabledFunc mocks the DeviceSetAutoBoostedClocksEnabled method. DeviceSetAutoBoostedClocksEnabledFunc func(device nvml.Device, enableState nvml.EnableState) nvml.Return + // DeviceSetClockOffsetsFunc mocks the DeviceSetClockOffsets method. + DeviceSetClockOffsetsFunc func(device nvml.Device, clockOffset nvml.ClockOffset) nvml.Return + // DeviceSetComputeModeFunc mocks the DeviceSetComputeMode method. DeviceSetComputeModeFunc func(device nvml.Device, computeMode nvml.ComputeMode) nvml.Return + // DeviceSetConfComputeUnprotectedMemSizeFunc mocks the DeviceSetConfComputeUnprotectedMemSize method. + DeviceSetConfComputeUnprotectedMemSizeFunc func(device nvml.Device, v uint64) nvml.Return + // DeviceSetCpuAffinityFunc mocks the DeviceSetCpuAffinity method. DeviceSetCpuAffinityFunc func(device nvml.Device) nvml.Return @@ -1468,6 +1810,9 @@ type Interface struct { // DeviceSetDefaultFanSpeed_v2Func mocks the DeviceSetDefaultFanSpeed_v2 method. DeviceSetDefaultFanSpeed_v2Func func(device nvml.Device, n int) nvml.Return + // DeviceSetDramEncryptionModeFunc mocks the DeviceSetDramEncryptionMode method. + DeviceSetDramEncryptionModeFunc func(device nvml.Device, dramEncryptionInfo *nvml.DramEncryptionInfo) nvml.Return + // DeviceSetDriverModelFunc mocks the DeviceSetDriverModel method. DeviceSetDriverModelFunc func(device nvml.Device, driverModel nvml.DriverModel, v uint32) nvml.Return @@ -1504,15 +1849,27 @@ type Interface struct { // DeviceSetNvLinkUtilizationControlFunc mocks the DeviceSetNvLinkUtilizationControl method. DeviceSetNvLinkUtilizationControlFunc func(device nvml.Device, n1 int, n2 int, nvLinkUtilizationControl *nvml.NvLinkUtilizationControl, b bool) nvml.Return + // DeviceSetNvlinkBwModeFunc mocks the DeviceSetNvlinkBwMode method. + DeviceSetNvlinkBwModeFunc func(device nvml.Device, nvlinkSetBwMode *nvml.NvlinkSetBwMode) nvml.Return + // DeviceSetPersistenceModeFunc mocks the DeviceSetPersistenceMode method. DeviceSetPersistenceModeFunc func(device nvml.Device, enableState nvml.EnableState) nvml.Return // DeviceSetPowerManagementLimitFunc mocks the DeviceSetPowerManagementLimit method. DeviceSetPowerManagementLimitFunc func(device nvml.Device, v uint32) nvml.Return + // DeviceSetPowerManagementLimit_v2Func mocks the DeviceSetPowerManagementLimit_v2 method. + DeviceSetPowerManagementLimit_v2Func func(device nvml.Device, powerValue_v2 *nvml.PowerValue_v2) nvml.Return + // DeviceSetTemperatureThresholdFunc mocks the DeviceSetTemperatureThreshold method. DeviceSetTemperatureThresholdFunc func(device nvml.Device, temperatureThresholds nvml.TemperatureThresholds, n int) nvml.Return + // DeviceSetVgpuCapabilitiesFunc mocks the DeviceSetVgpuCapabilities method. + DeviceSetVgpuCapabilitiesFunc func(device nvml.Device, deviceVgpuCapability nvml.DeviceVgpuCapability, enableState nvml.EnableState) nvml.Return + + // DeviceSetVgpuHeterogeneousModeFunc mocks the DeviceSetVgpuHeterogeneousMode method. + DeviceSetVgpuHeterogeneousModeFunc func(device nvml.Device, vgpuHeterogeneousMode nvml.VgpuHeterogeneousMode) nvml.Return + // DeviceSetVgpuSchedulerStateFunc mocks the DeviceSetVgpuSchedulerState method. DeviceSetVgpuSchedulerStateFunc func(device nvml.Device, vgpuSchedulerSetState *nvml.VgpuSchedulerSetState) nvml.Return @@ -1522,6 +1879,18 @@ type Interface struct { // DeviceValidateInforomFunc mocks the DeviceValidateInforom method. DeviceValidateInforomFunc func(device nvml.Device) nvml.Return + // DeviceWorkloadPowerProfileClearRequestedProfilesFunc mocks the DeviceWorkloadPowerProfileClearRequestedProfiles method. + DeviceWorkloadPowerProfileClearRequestedProfilesFunc func(device nvml.Device, workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return + + // DeviceWorkloadPowerProfileGetCurrentProfilesFunc mocks the DeviceWorkloadPowerProfileGetCurrentProfiles method. + DeviceWorkloadPowerProfileGetCurrentProfilesFunc func(device nvml.Device) (nvml.WorkloadPowerProfileCurrentProfiles, nvml.Return) + + // DeviceWorkloadPowerProfileGetProfilesInfoFunc mocks the DeviceWorkloadPowerProfileGetProfilesInfo method. + DeviceWorkloadPowerProfileGetProfilesInfoFunc func(device nvml.Device) (nvml.WorkloadPowerProfileProfilesInfo, nvml.Return) + + // DeviceWorkloadPowerProfileSetRequestedProfilesFunc mocks the DeviceWorkloadPowerProfileSetRequestedProfiles method. + DeviceWorkloadPowerProfileSetRequestedProfilesFunc func(device nvml.Device, workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return + // ErrorStringFunc mocks the ErrorString method. ErrorStringFunc func(returnMoqParam nvml.Return) string @@ -1567,6 +1936,9 @@ type Interface struct { // GpmQueryDeviceSupportVFunc mocks the GpmQueryDeviceSupportV method. GpmQueryDeviceSupportVFunc func(device nvml.Device) nvml.GpmSupportV + // GpmQueryIfStreamingEnabledFunc mocks the GpmQueryIfStreamingEnabled method. + GpmQueryIfStreamingEnabledFunc func(device nvml.Device) (uint32, nvml.Return) + // GpmSampleAllocFunc mocks the GpmSampleAlloc method. GpmSampleAllocFunc func() (nvml.GpmSample, nvml.Return) @@ -1576,6 +1948,9 @@ type Interface struct { // GpmSampleGetFunc mocks the GpmSampleGet method. GpmSampleGetFunc func(device nvml.Device, gpmSample nvml.GpmSample) nvml.Return + // GpmSetStreamingEnabledFunc mocks the GpmSetStreamingEnabled method. + GpmSetStreamingEnabledFunc func(device nvml.Device, v uint32) nvml.Return + // GpuInstanceCreateComputeInstanceFunc mocks the GpuInstanceCreateComputeInstance method. GpuInstanceCreateComputeInstanceFunc func(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (nvml.ComputeInstance, nvml.Return) @@ -1585,6 +1960,9 @@ type Interface struct { // GpuInstanceDestroyFunc mocks the GpuInstanceDestroy method. GpuInstanceDestroyFunc func(gpuInstance nvml.GpuInstance) nvml.Return + // GpuInstanceGetActiveVgpusFunc mocks the GpuInstanceGetActiveVgpus method. + GpuInstanceGetActiveVgpusFunc func(gpuInstance nvml.GpuInstance) (nvml.ActiveVgpuInstanceInfo, nvml.Return) + // GpuInstanceGetComputeInstanceByIdFunc mocks the GpuInstanceGetComputeInstanceById method. GpuInstanceGetComputeInstanceByIdFunc func(gpuInstance nvml.GpuInstance, n int) (nvml.ComputeInstance, nvml.Return) @@ -1595,7 +1973,7 @@ type Interface struct { GpuInstanceGetComputeInstanceProfileInfoFunc func(gpuInstance nvml.GpuInstance, n1 int, n2 int) (nvml.ComputeInstanceProfileInfo, nvml.Return) // GpuInstanceGetComputeInstanceProfileInfoVFunc mocks the GpuInstanceGetComputeInstanceProfileInfoV method. - GpuInstanceGetComputeInstanceProfileInfoVFunc func(gpuInstance nvml.GpuInstance, n1 int, n2 int) nvml.ComputeInstanceProfileInfoV + GpuInstanceGetComputeInstanceProfileInfoVFunc func(gpuInstance nvml.GpuInstance, n1 int, n2 int) nvml.ComputeInstanceProfileInfoHandler // GpuInstanceGetComputeInstanceRemainingCapacityFunc mocks the GpuInstanceGetComputeInstanceRemainingCapacity method. GpuInstanceGetComputeInstanceRemainingCapacityFunc func(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (int, nvml.Return) @@ -1603,9 +1981,30 @@ type Interface struct { // GpuInstanceGetComputeInstancesFunc mocks the GpuInstanceGetComputeInstances method. GpuInstanceGetComputeInstancesFunc func(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) ([]nvml.ComputeInstance, nvml.Return) + // GpuInstanceGetCreatableVgpusFunc mocks the GpuInstanceGetCreatableVgpus method. + GpuInstanceGetCreatableVgpusFunc func(gpuInstance nvml.GpuInstance) (nvml.VgpuTypeIdInfo, nvml.Return) + // GpuInstanceGetInfoFunc mocks the GpuInstanceGetInfo method. GpuInstanceGetInfoFunc func(gpuInstance nvml.GpuInstance) (nvml.GpuInstanceInfo, nvml.Return) + // GpuInstanceGetVgpuHeterogeneousModeFunc mocks the GpuInstanceGetVgpuHeterogeneousMode method. + GpuInstanceGetVgpuHeterogeneousModeFunc func(gpuInstance nvml.GpuInstance) (nvml.VgpuHeterogeneousMode, nvml.Return) + + // GpuInstanceGetVgpuSchedulerLogFunc mocks the GpuInstanceGetVgpuSchedulerLog method. + GpuInstanceGetVgpuSchedulerLogFunc func(gpuInstance nvml.GpuInstance) (nvml.VgpuSchedulerLogInfo, nvml.Return) + + // GpuInstanceGetVgpuSchedulerStateFunc mocks the GpuInstanceGetVgpuSchedulerState method. + GpuInstanceGetVgpuSchedulerStateFunc func(gpuInstance nvml.GpuInstance) (nvml.VgpuSchedulerStateInfo, nvml.Return) + + // GpuInstanceGetVgpuTypeCreatablePlacementsFunc mocks the GpuInstanceGetVgpuTypeCreatablePlacements method. + GpuInstanceGetVgpuTypeCreatablePlacementsFunc func(gpuInstance nvml.GpuInstance) (nvml.VgpuCreatablePlacementInfo, nvml.Return) + + // GpuInstanceSetVgpuHeterogeneousModeFunc mocks the GpuInstanceSetVgpuHeterogeneousMode method. + GpuInstanceSetVgpuHeterogeneousModeFunc func(gpuInstance nvml.GpuInstance, vgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode) nvml.Return + + // GpuInstanceSetVgpuSchedulerStateFunc mocks the GpuInstanceSetVgpuSchedulerState method. + GpuInstanceSetVgpuSchedulerStateFunc func(gpuInstance nvml.GpuInstance, vgpuSchedulerState *nvml.VgpuSchedulerState) nvml.Return + // InitFunc mocks the Init method. InitFunc func() nvml.Return @@ -1618,12 +2017,39 @@ type Interface struct { // ShutdownFunc mocks the Shutdown method. ShutdownFunc func() nvml.Return + // SystemEventSetCreateFunc mocks the SystemEventSetCreate method. + SystemEventSetCreateFunc func(systemEventSetCreateRequest *nvml.SystemEventSetCreateRequest) nvml.Return + + // SystemEventSetFreeFunc mocks the SystemEventSetFree method. + SystemEventSetFreeFunc func(systemEventSetFreeRequest *nvml.SystemEventSetFreeRequest) nvml.Return + + // SystemEventSetWaitFunc mocks the SystemEventSetWait method. + SystemEventSetWaitFunc func(systemEventSetWaitRequest *nvml.SystemEventSetWaitRequest) nvml.Return + + // SystemGetConfComputeCapabilitiesFunc mocks the SystemGetConfComputeCapabilities method. + SystemGetConfComputeCapabilitiesFunc func() (nvml.ConfComputeSystemCaps, nvml.Return) + + // SystemGetConfComputeGpusReadyStateFunc mocks the SystemGetConfComputeGpusReadyState method. + SystemGetConfComputeGpusReadyStateFunc func() (uint32, nvml.Return) + + // SystemGetConfComputeKeyRotationThresholdInfoFunc mocks the SystemGetConfComputeKeyRotationThresholdInfo method. + SystemGetConfComputeKeyRotationThresholdInfoFunc func() (nvml.ConfComputeGetKeyRotationThresholdInfo, nvml.Return) + + // SystemGetConfComputeSettingsFunc mocks the SystemGetConfComputeSettings method. + SystemGetConfComputeSettingsFunc func() (nvml.SystemConfComputeSettings, nvml.Return) + + // SystemGetConfComputeStateFunc mocks the SystemGetConfComputeState method. + SystemGetConfComputeStateFunc func() (nvml.ConfComputeSystemState, nvml.Return) + // SystemGetCudaDriverVersionFunc mocks the SystemGetCudaDriverVersion method. SystemGetCudaDriverVersionFunc func() (int, nvml.Return) // SystemGetCudaDriverVersion_v2Func mocks the SystemGetCudaDriverVersion_v2 method. SystemGetCudaDriverVersion_v2Func func() (int, nvml.Return) + // SystemGetDriverBranchFunc mocks the SystemGetDriverBranch method. + SystemGetDriverBranchFunc func() (nvml.SystemDriverBranchInfo, nvml.Return) + // SystemGetDriverVersionFunc mocks the SystemGetDriverVersion method. SystemGetDriverVersionFunc func() (string, nvml.Return) @@ -1633,12 +2059,27 @@ type Interface struct { // SystemGetNVMLVersionFunc mocks the SystemGetNVMLVersion method. SystemGetNVMLVersionFunc func() (string, nvml.Return) + // SystemGetNvlinkBwModeFunc mocks the SystemGetNvlinkBwMode method. + SystemGetNvlinkBwModeFunc func() (uint32, nvml.Return) + // SystemGetProcessNameFunc mocks the SystemGetProcessName method. SystemGetProcessNameFunc func(n int) (string, nvml.Return) // SystemGetTopologyGpuSetFunc mocks the SystemGetTopologyGpuSet method. SystemGetTopologyGpuSetFunc func(n int) ([]nvml.Device, nvml.Return) + // SystemRegisterEventsFunc mocks the SystemRegisterEvents method. + SystemRegisterEventsFunc func(systemRegisterEventRequest *nvml.SystemRegisterEventRequest) nvml.Return + + // SystemSetConfComputeGpusReadyStateFunc mocks the SystemSetConfComputeGpusReadyState method. + SystemSetConfComputeGpusReadyStateFunc func(v uint32) nvml.Return + + // SystemSetConfComputeKeyRotationThresholdInfoFunc mocks the SystemSetConfComputeKeyRotationThresholdInfo method. + SystemSetConfComputeKeyRotationThresholdInfoFunc func(confComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo) nvml.Return + + // SystemSetNvlinkBwModeFunc mocks the SystemSetNvlinkBwMode method. + SystemSetNvlinkBwModeFunc func(v uint32) nvml.Return + // UnitGetCountFunc mocks the UnitGetCount method. UnitGetCountFunc func() (int, nvml.Return) @@ -1720,6 +2161,9 @@ type Interface struct { // VgpuInstanceGetMetadataFunc mocks the VgpuInstanceGetMetadata method. VgpuInstanceGetMetadataFunc func(vgpuInstance nvml.VgpuInstance) (nvml.VgpuMetadata, nvml.Return) + // VgpuInstanceGetRuntimeStateSizeFunc mocks the VgpuInstanceGetRuntimeStateSize method. + VgpuInstanceGetRuntimeStateSizeFunc func(vgpuInstance nvml.VgpuInstance) (nvml.VgpuRuntimeState, nvml.Return) + // VgpuInstanceGetTypeFunc mocks the VgpuInstanceGetType method. VgpuInstanceGetTypeFunc func(vgpuInstance nvml.VgpuInstance) (nvml.VgpuTypeId, nvml.Return) @@ -1735,6 +2179,9 @@ type Interface struct { // VgpuInstanceSetEncoderCapacityFunc mocks the VgpuInstanceSetEncoderCapacity method. VgpuInstanceSetEncoderCapacityFunc func(vgpuInstance nvml.VgpuInstance, n int) nvml.Return + // VgpuTypeGetBAR1InfoFunc mocks the VgpuTypeGetBAR1Info method. + VgpuTypeGetBAR1InfoFunc func(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuTypeBar1Info, nvml.Return) + // VgpuTypeGetCapabilitiesFunc mocks the VgpuTypeGetCapabilities method. VgpuTypeGetCapabilitiesFunc func(vgpuTypeId nvml.VgpuTypeId, vgpuCapability nvml.VgpuCapability) (bool, nvml.Return) @@ -1759,6 +2206,9 @@ type Interface struct { // VgpuTypeGetMaxInstancesFunc mocks the VgpuTypeGetMaxInstances method. VgpuTypeGetMaxInstancesFunc func(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) + // VgpuTypeGetMaxInstancesPerGpuInstanceFunc mocks the VgpuTypeGetMaxInstancesPerGpuInstance method. + VgpuTypeGetMaxInstancesPerGpuInstanceFunc func(vgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance) nvml.Return + // VgpuTypeGetMaxInstancesPerVmFunc mocks the VgpuTypeGetMaxInstancesPerVm method. VgpuTypeGetMaxInstancesPerVmFunc func(vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) @@ -1783,18 +2233,6 @@ type Interface struct { // ComputeInstance is the computeInstance argument value. ComputeInstance nvml.ComputeInstance } - // DeviceCcuGetStreamState holds details about calls to the DeviceCcuGetStreamState method. - DeviceCcuGetStreamState []struct { - // Device is the device argument value. - Device nvml.Device - } - // DeviceCcuSetStreamState holds details about calls to the DeviceCcuSetStreamState method. - DeviceCcuSetStreamState []struct { - // Device is the device argument value. - Device nvml.Device - // N is the n argument value. - N int - } // DeviceClearAccountingPids holds details about calls to the DeviceClearAccountingPids method. DeviceClearAccountingPids []struct { // Device is the device argument value. @@ -1940,6 +2378,16 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetC2cModeInfoV holds details about calls to the DeviceGetC2cModeInfoV method. + DeviceGetC2cModeInfoV []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetCapabilities holds details about calls to the DeviceGetCapabilities method. + DeviceGetCapabilities []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetClkMonStatus holds details about calls to the DeviceGetClkMonStatus method. DeviceGetClkMonStatus []struct { // Device is the device argument value. @@ -1961,6 +2409,11 @@ type Interface struct { // ClockType is the clockType argument value. ClockType nvml.ClockType } + // DeviceGetClockOffsets holds details about calls to the DeviceGetClockOffsets method. + DeviceGetClockOffsets []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetComputeInstanceId holds details about calls to the DeviceGetComputeInstanceId method. DeviceGetComputeInstanceId []struct { // Device is the device argument value. @@ -1976,6 +2429,31 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetConfComputeGpuAttestationReport holds details about calls to the DeviceGetConfComputeGpuAttestationReport method. + DeviceGetConfComputeGpuAttestationReport []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetConfComputeGpuCertificate holds details about calls to the DeviceGetConfComputeGpuCertificate method. + DeviceGetConfComputeGpuCertificate []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetConfComputeMemSizeInfo holds details about calls to the DeviceGetConfComputeMemSizeInfo method. + DeviceGetConfComputeMemSizeInfo []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetConfComputeProtectedMemoryUsage holds details about calls to the DeviceGetConfComputeProtectedMemoryUsage method. + DeviceGetConfComputeProtectedMemoryUsage []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetCoolerInfo holds details about calls to the DeviceGetCoolerInfo method. + DeviceGetCoolerInfo []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetCount holds details about calls to the DeviceGetCount method. DeviceGetCount []struct { } @@ -2015,6 +2493,16 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetCurrentClockFreqs holds details about calls to the DeviceGetCurrentClockFreqs method. + DeviceGetCurrentClockFreqs []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetCurrentClocksEventReasons holds details about calls to the DeviceGetCurrentClocksEventReasons method. + DeviceGetCurrentClocksEventReasons []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetCurrentClocksThrottleReasons holds details about calls to the DeviceGetCurrentClocksThrottleReasons method. DeviceGetCurrentClocksThrottleReasons []struct { // Device is the device argument value. @@ -2061,11 +2549,21 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetDramEncryptionMode holds details about calls to the DeviceGetDramEncryptionMode method. + DeviceGetDramEncryptionMode []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetDriverModel holds details about calls to the DeviceGetDriverModel method. DeviceGetDriverModel []struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetDriverModel_v2 holds details about calls to the DeviceGetDriverModel_v2 method. + DeviceGetDriverModel_v2 []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetDynamicPstatesInfo holds details about calls to the DeviceGetDynamicPstatesInfo method. DeviceGetDynamicPstatesInfo []struct { // Device is the device argument value. @@ -2125,6 +2623,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetFanSpeedRPM holds details about calls to the DeviceGetFanSpeedRPM method. + DeviceGetFanSpeedRPM []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetFanSpeed_v2 holds details about calls to the DeviceGetFanSpeed_v2 method. DeviceGetFanSpeed_v2 []struct { // Device is the device argument value. @@ -2154,6 +2657,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetGpuFabricInfoV holds details about calls to the DeviceGetGpuFabricInfoV method. + DeviceGetGpuFabricInfoV []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetGpuInstanceById holds details about calls to the DeviceGetGpuInstanceById method. DeviceGetGpuInstanceById []struct { // Device is the device argument value. @@ -2251,6 +2759,11 @@ type Interface struct { // S is the s argument value. S string } + // DeviceGetHandleByUUIDV holds details about calls to the DeviceGetHandleByUUIDV method. + DeviceGetHandleByUUIDV []struct { + // UUID is the uUID argument value. + UUID *nvml.UUID + } // DeviceGetHostVgpuMode holds details about calls to the DeviceGetHostVgpuMode method. DeviceGetHostVgpuMode []struct { // Device is the device argument value. @@ -2283,11 +2796,26 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetJpgUtilization holds details about calls to the DeviceGetJpgUtilization method. + DeviceGetJpgUtilization []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetLastBBXFlushTime holds details about calls to the DeviceGetLastBBXFlushTime method. + DeviceGetLastBBXFlushTime []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetMPSComputeRunningProcesses holds details about calls to the DeviceGetMPSComputeRunningProcesses method. DeviceGetMPSComputeRunningProcesses []struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetMarginTemperature holds details about calls to the DeviceGetMarginTemperature method. + DeviceGetMarginTemperature []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetMaxClockInfo holds details about calls to the DeviceGetMaxClockInfo method. DeviceGetMaxClockInfo []struct { // Device is the device argument value. @@ -2393,6 +2921,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetModuleId holds details about calls to the DeviceGetModuleId method. + DeviceGetModuleId []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetMultiGpuBoard holds details about calls to the DeviceGetMultiGpuBoard method. DeviceGetMultiGpuBoard []struct { // Device is the device argument value. @@ -2413,6 +2946,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetNumaNodeId holds details about calls to the DeviceGetNumaNodeId method. + DeviceGetNumaNodeId []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetNvLinkCapability holds details about calls to the DeviceGetNvLinkCapability method. DeviceGetNvLinkCapability []struct { // Device is the device argument value. @@ -2477,6 +3015,21 @@ type Interface struct { // N is the n argument value. N int } + // DeviceGetNvlinkBwMode holds details about calls to the DeviceGetNvlinkBwMode method. + DeviceGetNvlinkBwMode []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetNvlinkSupportedBwModes holds details about calls to the DeviceGetNvlinkSupportedBwModes method. + DeviceGetNvlinkSupportedBwModes []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetOfaUtilization holds details about calls to the DeviceGetOfaUtilization method. + DeviceGetOfaUtilization []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetP2PStatus holds details about calls to the DeviceGetP2PStatus method. DeviceGetP2PStatus []struct { // Device1 is the device1 argument value. @@ -2491,6 +3044,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetPciInfoExt holds details about calls to the DeviceGetPciInfoExt method. + DeviceGetPciInfoExt []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetPcieLinkMaxSpeed holds details about calls to the DeviceGetPcieLinkMaxSpeed method. DeviceGetPcieLinkMaxSpeed []struct { // Device is the device argument value. @@ -2513,6 +3071,11 @@ type Interface struct { // PcieUtilCounter is the pcieUtilCounter argument value. PcieUtilCounter nvml.PcieUtilCounter } + // DeviceGetPerformanceModes holds details about calls to the DeviceGetPerformanceModes method. + DeviceGetPerformanceModes []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetPerformanceState holds details about calls to the DeviceGetPerformanceState method. DeviceGetPerformanceState []struct { // Device is the device argument value. @@ -2528,6 +3091,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetPlatformInfo holds details about calls to the DeviceGetPlatformInfo method. + DeviceGetPlatformInfo []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetPowerManagementDefaultLimit holds details about calls to the DeviceGetPowerManagementDefaultLimit method. DeviceGetPowerManagementDefaultLimit []struct { // Device is the device argument value. @@ -2570,6 +3138,11 @@ type Interface struct { // V is the v argument value. V uint64 } + // DeviceGetProcessesUtilizationInfo holds details about calls to the DeviceGetProcessesUtilizationInfo method. + DeviceGetProcessesUtilizationInfo []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetRemappedRows holds details about calls to the DeviceGetRemappedRows method. DeviceGetRemappedRows []struct { // Device is the device argument value. @@ -2599,6 +3172,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetRunningProcessDetailList holds details about calls to the DeviceGetRunningProcessDetailList method. + DeviceGetRunningProcessDetailList []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetSamples holds details about calls to the DeviceGetSamples method. DeviceGetSamples []struct { // Device is the device argument value. @@ -2613,6 +3191,16 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetSramEccErrorStatus holds details about calls to the DeviceGetSramEccErrorStatus method. + DeviceGetSramEccErrorStatus []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetSupportedClocksEventReasons holds details about calls to the DeviceGetSupportedClocksEventReasons method. + DeviceGetSupportedClocksEventReasons []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetSupportedClocksThrottleReasons holds details about calls to the DeviceGetSupportedClocksThrottleReasons method. DeviceGetSupportedClocksThrottleReasons []struct { // Device is the device argument value. @@ -2666,8 +3254,13 @@ type Interface struct { // TemperatureThresholds is the temperatureThresholds argument value. TemperatureThresholds nvml.TemperatureThresholds } - // DeviceGetThermalSettings holds details about calls to the DeviceGetThermalSettings method. - DeviceGetThermalSettings []struct { + // DeviceGetTemperatureV holds details about calls to the DeviceGetTemperatureV method. + DeviceGetTemperatureV []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetThermalSettings holds details about calls to the DeviceGetThermalSettings method. + DeviceGetThermalSettings []struct { // Device is the device argument value. Device nvml.Device // V is the v argument value. @@ -2723,6 +3316,16 @@ type Interface struct { // DeviceVgpuCapability is the deviceVgpuCapability argument value. DeviceVgpuCapability nvml.DeviceVgpuCapability } + // DeviceGetVgpuHeterogeneousMode holds details about calls to the DeviceGetVgpuHeterogeneousMode method. + DeviceGetVgpuHeterogeneousMode []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceGetVgpuInstancesUtilizationInfo holds details about calls to the DeviceGetVgpuInstancesUtilizationInfo method. + DeviceGetVgpuInstancesUtilizationInfo []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetVgpuMetadata holds details about calls to the DeviceGetVgpuMetadata method. DeviceGetVgpuMetadata []struct { // Device is the device argument value. @@ -2735,6 +3338,11 @@ type Interface struct { // V is the v argument value. V uint64 } + // DeviceGetVgpuProcessesUtilizationInfo holds details about calls to the DeviceGetVgpuProcessesUtilizationInfo method. + DeviceGetVgpuProcessesUtilizationInfo []struct { + // Device is the device argument value. + Device nvml.Device + } // DeviceGetVgpuSchedulerCapabilities holds details about calls to the DeviceGetVgpuSchedulerCapabilities method. DeviceGetVgpuSchedulerCapabilities []struct { // Device is the device argument value. @@ -2750,6 +3358,20 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceGetVgpuTypeCreatablePlacements holds details about calls to the DeviceGetVgpuTypeCreatablePlacements method. + DeviceGetVgpuTypeCreatablePlacements []struct { + // Device is the device argument value. + Device nvml.Device + // VgpuTypeId is the vgpuTypeId argument value. + VgpuTypeId nvml.VgpuTypeId + } + // DeviceGetVgpuTypeSupportedPlacements holds details about calls to the DeviceGetVgpuTypeSupportedPlacements method. + DeviceGetVgpuTypeSupportedPlacements []struct { + // Device is the device argument value. + Device nvml.Device + // VgpuTypeId is the vgpuTypeId argument value. + VgpuTypeId nvml.VgpuTypeId + } // DeviceGetVgpuUtilization holds details about calls to the DeviceGetVgpuUtilization method. DeviceGetVgpuUtilization []struct { // Device is the device argument value. @@ -2788,6 +3410,27 @@ type Interface struct { // Device2 is the device2 argument value. Device2 nvml.Device } + // DevicePowerSmoothingActivatePresetProfile holds details about calls to the DevicePowerSmoothingActivatePresetProfile method. + DevicePowerSmoothingActivatePresetProfile []struct { + // Device is the device argument value. + Device nvml.Device + // PowerSmoothingProfile is the powerSmoothingProfile argument value. + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } + // DevicePowerSmoothingSetState holds details about calls to the DevicePowerSmoothingSetState method. + DevicePowerSmoothingSetState []struct { + // Device is the device argument value. + Device nvml.Device + // PowerSmoothingState is the powerSmoothingState argument value. + PowerSmoothingState *nvml.PowerSmoothingState + } + // DevicePowerSmoothingUpdatePresetProfileParam holds details about calls to the DevicePowerSmoothingUpdatePresetProfileParam method. + DevicePowerSmoothingUpdatePresetProfileParam []struct { + // Device is the device argument value. + Device nvml.Device + // PowerSmoothingProfile is the powerSmoothingProfile argument value. + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } // DeviceQueryDrainState holds details about calls to the DeviceQueryDrainState method. DeviceQueryDrainState []struct { // PciInfo is the pciInfo argument value. @@ -2879,6 +3522,13 @@ type Interface struct { // EnableState is the enableState argument value. EnableState nvml.EnableState } + // DeviceSetClockOffsets holds details about calls to the DeviceSetClockOffsets method. + DeviceSetClockOffsets []struct { + // Device is the device argument value. + Device nvml.Device + // ClockOffset is the clockOffset argument value. + ClockOffset nvml.ClockOffset + } // DeviceSetComputeMode holds details about calls to the DeviceSetComputeMode method. DeviceSetComputeMode []struct { // Device is the device argument value. @@ -2886,6 +3536,13 @@ type Interface struct { // ComputeMode is the computeMode argument value. ComputeMode nvml.ComputeMode } + // DeviceSetConfComputeUnprotectedMemSize holds details about calls to the DeviceSetConfComputeUnprotectedMemSize method. + DeviceSetConfComputeUnprotectedMemSize []struct { + // Device is the device argument value. + Device nvml.Device + // V is the v argument value. + V uint64 + } // DeviceSetCpuAffinity holds details about calls to the DeviceSetCpuAffinity method. DeviceSetCpuAffinity []struct { // Device is the device argument value. @@ -2907,6 +3564,13 @@ type Interface struct { // N is the n argument value. N int } + // DeviceSetDramEncryptionMode holds details about calls to the DeviceSetDramEncryptionMode method. + DeviceSetDramEncryptionMode []struct { + // Device is the device argument value. + Device nvml.Device + // DramEncryptionInfo is the dramEncryptionInfo argument value. + DramEncryptionInfo *nvml.DramEncryptionInfo + } // DeviceSetDriverModel holds details about calls to the DeviceSetDriverModel method. DeviceSetDriverModel []struct { // Device is the device argument value. @@ -3007,6 +3671,13 @@ type Interface struct { // B is the b argument value. B bool } + // DeviceSetNvlinkBwMode holds details about calls to the DeviceSetNvlinkBwMode method. + DeviceSetNvlinkBwMode []struct { + // Device is the device argument value. + Device nvml.Device + // NvlinkSetBwMode is the nvlinkSetBwMode argument value. + NvlinkSetBwMode *nvml.NvlinkSetBwMode + } // DeviceSetPersistenceMode holds details about calls to the DeviceSetPersistenceMode method. DeviceSetPersistenceMode []struct { // Device is the device argument value. @@ -3021,6 +3692,13 @@ type Interface struct { // V is the v argument value. V uint32 } + // DeviceSetPowerManagementLimit_v2 holds details about calls to the DeviceSetPowerManagementLimit_v2 method. + DeviceSetPowerManagementLimit_v2 []struct { + // Device is the device argument value. + Device nvml.Device + // PowerValue_v2 is the powerValue_v2 argument value. + PowerValue_v2 *nvml.PowerValue_v2 + } // DeviceSetTemperatureThreshold holds details about calls to the DeviceSetTemperatureThreshold method. DeviceSetTemperatureThreshold []struct { // Device is the device argument value. @@ -3030,6 +3708,22 @@ type Interface struct { // N is the n argument value. N int } + // DeviceSetVgpuCapabilities holds details about calls to the DeviceSetVgpuCapabilities method. + DeviceSetVgpuCapabilities []struct { + // Device is the device argument value. + Device nvml.Device + // DeviceVgpuCapability is the deviceVgpuCapability argument value. + DeviceVgpuCapability nvml.DeviceVgpuCapability + // EnableState is the enableState argument value. + EnableState nvml.EnableState + } + // DeviceSetVgpuHeterogeneousMode holds details about calls to the DeviceSetVgpuHeterogeneousMode method. + DeviceSetVgpuHeterogeneousMode []struct { + // Device is the device argument value. + Device nvml.Device + // VgpuHeterogeneousMode is the vgpuHeterogeneousMode argument value. + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode + } // DeviceSetVgpuSchedulerState holds details about calls to the DeviceSetVgpuSchedulerState method. DeviceSetVgpuSchedulerState []struct { // Device is the device argument value. @@ -3049,6 +3743,30 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // DeviceWorkloadPowerProfileClearRequestedProfiles holds details about calls to the DeviceWorkloadPowerProfileClearRequestedProfiles method. + DeviceWorkloadPowerProfileClearRequestedProfiles []struct { + // Device is the device argument value. + Device nvml.Device + // WorkloadPowerProfileRequestedProfiles is the workloadPowerProfileRequestedProfiles argument value. + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + // DeviceWorkloadPowerProfileGetCurrentProfiles holds details about calls to the DeviceWorkloadPowerProfileGetCurrentProfiles method. + DeviceWorkloadPowerProfileGetCurrentProfiles []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceWorkloadPowerProfileGetProfilesInfo holds details about calls to the DeviceWorkloadPowerProfileGetProfilesInfo method. + DeviceWorkloadPowerProfileGetProfilesInfo []struct { + // Device is the device argument value. + Device nvml.Device + } + // DeviceWorkloadPowerProfileSetRequestedProfiles holds details about calls to the DeviceWorkloadPowerProfileSetRequestedProfiles method. + DeviceWorkloadPowerProfileSetRequestedProfiles []struct { + // Device is the device argument value. + Device nvml.Device + // WorkloadPowerProfileRequestedProfiles is the workloadPowerProfileRequestedProfiles argument value. + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } // ErrorString holds details about calls to the ErrorString method. ErrorString []struct { // ReturnMoqParam is the returnMoqParam argument value. @@ -3124,6 +3842,11 @@ type Interface struct { // Device is the device argument value. Device nvml.Device } + // GpmQueryIfStreamingEnabled holds details about calls to the GpmQueryIfStreamingEnabled method. + GpmQueryIfStreamingEnabled []struct { + // Device is the device argument value. + Device nvml.Device + } // GpmSampleAlloc holds details about calls to the GpmSampleAlloc method. GpmSampleAlloc []struct { } @@ -3139,6 +3862,13 @@ type Interface struct { // GpmSample is the gpmSample argument value. GpmSample nvml.GpmSample } + // GpmSetStreamingEnabled holds details about calls to the GpmSetStreamingEnabled method. + GpmSetStreamingEnabled []struct { + // Device is the device argument value. + Device nvml.Device + // V is the v argument value. + V uint32 + } // GpuInstanceCreateComputeInstance holds details about calls to the GpuInstanceCreateComputeInstance method. GpuInstanceCreateComputeInstance []struct { // GpuInstance is the gpuInstance argument value. @@ -3160,6 +3890,11 @@ type Interface struct { // GpuInstance is the gpuInstance argument value. GpuInstance nvml.GpuInstance } + // GpuInstanceGetActiveVgpus holds details about calls to the GpuInstanceGetActiveVgpus method. + GpuInstanceGetActiveVgpus []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + } // GpuInstanceGetComputeInstanceById holds details about calls to the GpuInstanceGetComputeInstanceById method. GpuInstanceGetComputeInstanceById []struct { // GpuInstance is the gpuInstance argument value. @@ -3206,11 +3941,50 @@ type Interface struct { // ComputeInstanceProfileInfo is the computeInstanceProfileInfo argument value. ComputeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo } + // GpuInstanceGetCreatableVgpus holds details about calls to the GpuInstanceGetCreatableVgpus method. + GpuInstanceGetCreatableVgpus []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + } // GpuInstanceGetInfo holds details about calls to the GpuInstanceGetInfo method. GpuInstanceGetInfo []struct { // GpuInstance is the gpuInstance argument value. GpuInstance nvml.GpuInstance } + // GpuInstanceGetVgpuHeterogeneousMode holds details about calls to the GpuInstanceGetVgpuHeterogeneousMode method. + GpuInstanceGetVgpuHeterogeneousMode []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + } + // GpuInstanceGetVgpuSchedulerLog holds details about calls to the GpuInstanceGetVgpuSchedulerLog method. + GpuInstanceGetVgpuSchedulerLog []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + } + // GpuInstanceGetVgpuSchedulerState holds details about calls to the GpuInstanceGetVgpuSchedulerState method. + GpuInstanceGetVgpuSchedulerState []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + } + // GpuInstanceGetVgpuTypeCreatablePlacements holds details about calls to the GpuInstanceGetVgpuTypeCreatablePlacements method. + GpuInstanceGetVgpuTypeCreatablePlacements []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + } + // GpuInstanceSetVgpuHeterogeneousMode holds details about calls to the GpuInstanceSetVgpuHeterogeneousMode method. + GpuInstanceSetVgpuHeterogeneousMode []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + // VgpuHeterogeneousMode is the vgpuHeterogeneousMode argument value. + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode + } + // GpuInstanceSetVgpuSchedulerState holds details about calls to the GpuInstanceSetVgpuSchedulerState method. + GpuInstanceSetVgpuSchedulerState []struct { + // GpuInstance is the gpuInstance argument value. + GpuInstance nvml.GpuInstance + // VgpuSchedulerState is the vgpuSchedulerState argument value. + VgpuSchedulerState *nvml.VgpuSchedulerState + } // Init holds details about calls to the Init method. Init []struct { } @@ -3227,12 +4001,45 @@ type Interface struct { // Shutdown holds details about calls to the Shutdown method. Shutdown []struct { } + // SystemEventSetCreate holds details about calls to the SystemEventSetCreate method. + SystemEventSetCreate []struct { + // SystemEventSetCreateRequest is the systemEventSetCreateRequest argument value. + SystemEventSetCreateRequest *nvml.SystemEventSetCreateRequest + } + // SystemEventSetFree holds details about calls to the SystemEventSetFree method. + SystemEventSetFree []struct { + // SystemEventSetFreeRequest is the systemEventSetFreeRequest argument value. + SystemEventSetFreeRequest *nvml.SystemEventSetFreeRequest + } + // SystemEventSetWait holds details about calls to the SystemEventSetWait method. + SystemEventSetWait []struct { + // SystemEventSetWaitRequest is the systemEventSetWaitRequest argument value. + SystemEventSetWaitRequest *nvml.SystemEventSetWaitRequest + } + // SystemGetConfComputeCapabilities holds details about calls to the SystemGetConfComputeCapabilities method. + SystemGetConfComputeCapabilities []struct { + } + // SystemGetConfComputeGpusReadyState holds details about calls to the SystemGetConfComputeGpusReadyState method. + SystemGetConfComputeGpusReadyState []struct { + } + // SystemGetConfComputeKeyRotationThresholdInfo holds details about calls to the SystemGetConfComputeKeyRotationThresholdInfo method. + SystemGetConfComputeKeyRotationThresholdInfo []struct { + } + // SystemGetConfComputeSettings holds details about calls to the SystemGetConfComputeSettings method. + SystemGetConfComputeSettings []struct { + } + // SystemGetConfComputeState holds details about calls to the SystemGetConfComputeState method. + SystemGetConfComputeState []struct { + } // SystemGetCudaDriverVersion holds details about calls to the SystemGetCudaDriverVersion method. SystemGetCudaDriverVersion []struct { } // SystemGetCudaDriverVersion_v2 holds details about calls to the SystemGetCudaDriverVersion_v2 method. SystemGetCudaDriverVersion_v2 []struct { } + // SystemGetDriverBranch holds details about calls to the SystemGetDriverBranch method. + SystemGetDriverBranch []struct { + } // SystemGetDriverVersion holds details about calls to the SystemGetDriverVersion method. SystemGetDriverVersion []struct { } @@ -3242,6 +4049,9 @@ type Interface struct { // SystemGetNVMLVersion holds details about calls to the SystemGetNVMLVersion method. SystemGetNVMLVersion []struct { } + // SystemGetNvlinkBwMode holds details about calls to the SystemGetNvlinkBwMode method. + SystemGetNvlinkBwMode []struct { + } // SystemGetProcessName holds details about calls to the SystemGetProcessName method. SystemGetProcessName []struct { // N is the n argument value. @@ -3252,6 +4062,26 @@ type Interface struct { // N is the n argument value. N int } + // SystemRegisterEvents holds details about calls to the SystemRegisterEvents method. + SystemRegisterEvents []struct { + // SystemRegisterEventRequest is the systemRegisterEventRequest argument value. + SystemRegisterEventRequest *nvml.SystemRegisterEventRequest + } + // SystemSetConfComputeGpusReadyState holds details about calls to the SystemSetConfComputeGpusReadyState method. + SystemSetConfComputeGpusReadyState []struct { + // V is the v argument value. + V uint32 + } + // SystemSetConfComputeKeyRotationThresholdInfo holds details about calls to the SystemSetConfComputeKeyRotationThresholdInfo method. + SystemSetConfComputeKeyRotationThresholdInfo []struct { + // ConfComputeSetKeyRotationThresholdInfo is the confComputeSetKeyRotationThresholdInfo argument value. + ConfComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo + } + // SystemSetNvlinkBwMode holds details about calls to the SystemSetNvlinkBwMode method. + SystemSetNvlinkBwMode []struct { + // V is the v argument value. + V uint32 + } // UnitGetCount holds details about calls to the UnitGetCount method. UnitGetCount []struct { } @@ -3391,6 +4221,11 @@ type Interface struct { // VgpuInstance is the vgpuInstance argument value. VgpuInstance nvml.VgpuInstance } + // VgpuInstanceGetRuntimeStateSize holds details about calls to the VgpuInstanceGetRuntimeStateSize method. + VgpuInstanceGetRuntimeStateSize []struct { + // VgpuInstance is the vgpuInstance argument value. + VgpuInstance nvml.VgpuInstance + } // VgpuInstanceGetType holds details about calls to the VgpuInstanceGetType method. VgpuInstanceGetType []struct { // VgpuInstance is the vgpuInstance argument value. @@ -3418,6 +4253,11 @@ type Interface struct { // N is the n argument value. N int } + // VgpuTypeGetBAR1Info holds details about calls to the VgpuTypeGetBAR1Info method. + VgpuTypeGetBAR1Info []struct { + // VgpuTypeId is the vgpuTypeId argument value. + VgpuTypeId nvml.VgpuTypeId + } // VgpuTypeGetCapabilities holds details about calls to the VgpuTypeGetCapabilities method. VgpuTypeGetCapabilities []struct { // VgpuTypeId is the vgpuTypeId argument value. @@ -3462,6 +4302,11 @@ type Interface struct { // VgpuTypeId is the vgpuTypeId argument value. VgpuTypeId nvml.VgpuTypeId } + // VgpuTypeGetMaxInstancesPerGpuInstance holds details about calls to the VgpuTypeGetMaxInstancesPerGpuInstance method. + VgpuTypeGetMaxInstancesPerGpuInstance []struct { + // VgpuTypeMaxInstance is the vgpuTypeMaxInstance argument value. + VgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance + } // VgpuTypeGetMaxInstancesPerVm holds details about calls to the VgpuTypeGetMaxInstancesPerVm method. VgpuTypeGetMaxInstancesPerVm []struct { // VgpuTypeId is the vgpuTypeId argument value. @@ -3485,297 +4330,372 @@ type Interface struct { N int } } - lockComputeInstanceDestroy sync.RWMutex - lockComputeInstanceGetInfo sync.RWMutex - lockDeviceCcuGetStreamState sync.RWMutex - lockDeviceCcuSetStreamState sync.RWMutex - lockDeviceClearAccountingPids sync.RWMutex - lockDeviceClearCpuAffinity sync.RWMutex - lockDeviceClearEccErrorCounts sync.RWMutex - lockDeviceClearFieldValues sync.RWMutex - lockDeviceCreateGpuInstance sync.RWMutex - lockDeviceCreateGpuInstanceWithPlacement sync.RWMutex - lockDeviceDiscoverGpus sync.RWMutex - lockDeviceFreezeNvLinkUtilizationCounter sync.RWMutex - lockDeviceGetAPIRestriction sync.RWMutex - lockDeviceGetAccountingBufferSize sync.RWMutex - lockDeviceGetAccountingMode sync.RWMutex - lockDeviceGetAccountingPids sync.RWMutex - lockDeviceGetAccountingStats sync.RWMutex - lockDeviceGetActiveVgpus sync.RWMutex - lockDeviceGetAdaptiveClockInfoStatus sync.RWMutex - lockDeviceGetApplicationsClock sync.RWMutex - lockDeviceGetArchitecture sync.RWMutex - lockDeviceGetAttributes sync.RWMutex - lockDeviceGetAutoBoostedClocksEnabled sync.RWMutex - lockDeviceGetBAR1MemoryInfo sync.RWMutex - lockDeviceGetBoardId sync.RWMutex - lockDeviceGetBoardPartNumber sync.RWMutex - lockDeviceGetBrand sync.RWMutex - lockDeviceGetBridgeChipInfo sync.RWMutex - lockDeviceGetBusType sync.RWMutex - lockDeviceGetClkMonStatus sync.RWMutex - lockDeviceGetClock sync.RWMutex - lockDeviceGetClockInfo sync.RWMutex - lockDeviceGetComputeInstanceId sync.RWMutex - lockDeviceGetComputeMode sync.RWMutex - lockDeviceGetComputeRunningProcesses sync.RWMutex - lockDeviceGetCount sync.RWMutex - lockDeviceGetCpuAffinity sync.RWMutex - lockDeviceGetCpuAffinityWithinScope sync.RWMutex - lockDeviceGetCreatableVgpus sync.RWMutex - lockDeviceGetCudaComputeCapability sync.RWMutex - lockDeviceGetCurrPcieLinkGeneration sync.RWMutex - lockDeviceGetCurrPcieLinkWidth sync.RWMutex - lockDeviceGetCurrentClocksThrottleReasons sync.RWMutex - lockDeviceGetDecoderUtilization sync.RWMutex - lockDeviceGetDefaultApplicationsClock sync.RWMutex - lockDeviceGetDefaultEccMode sync.RWMutex - lockDeviceGetDetailedEccErrors sync.RWMutex - lockDeviceGetDeviceHandleFromMigDeviceHandle sync.RWMutex - lockDeviceGetDisplayActive sync.RWMutex - lockDeviceGetDisplayMode sync.RWMutex - lockDeviceGetDriverModel sync.RWMutex - lockDeviceGetDynamicPstatesInfo sync.RWMutex - lockDeviceGetEccMode sync.RWMutex - lockDeviceGetEncoderCapacity sync.RWMutex - lockDeviceGetEncoderSessions sync.RWMutex - lockDeviceGetEncoderStats sync.RWMutex - lockDeviceGetEncoderUtilization sync.RWMutex - lockDeviceGetEnforcedPowerLimit sync.RWMutex - lockDeviceGetFBCSessions sync.RWMutex - lockDeviceGetFBCStats sync.RWMutex - lockDeviceGetFanControlPolicy_v2 sync.RWMutex - lockDeviceGetFanSpeed sync.RWMutex - lockDeviceGetFanSpeed_v2 sync.RWMutex - lockDeviceGetFieldValues sync.RWMutex - lockDeviceGetGpcClkMinMaxVfOffset sync.RWMutex - lockDeviceGetGpcClkVfOffset sync.RWMutex - lockDeviceGetGpuFabricInfo sync.RWMutex - lockDeviceGetGpuInstanceById sync.RWMutex - lockDeviceGetGpuInstanceId sync.RWMutex - lockDeviceGetGpuInstancePossiblePlacements sync.RWMutex - lockDeviceGetGpuInstanceProfileInfo sync.RWMutex - lockDeviceGetGpuInstanceProfileInfoV sync.RWMutex - lockDeviceGetGpuInstanceRemainingCapacity sync.RWMutex - lockDeviceGetGpuInstances sync.RWMutex - lockDeviceGetGpuMaxPcieLinkGeneration sync.RWMutex - lockDeviceGetGpuOperationMode sync.RWMutex - lockDeviceGetGraphicsRunningProcesses sync.RWMutex - lockDeviceGetGridLicensableFeatures sync.RWMutex - lockDeviceGetGspFirmwareMode sync.RWMutex - lockDeviceGetGspFirmwareVersion sync.RWMutex - lockDeviceGetHandleByIndex sync.RWMutex - lockDeviceGetHandleByPciBusId sync.RWMutex - lockDeviceGetHandleBySerial sync.RWMutex - lockDeviceGetHandleByUUID sync.RWMutex - lockDeviceGetHostVgpuMode sync.RWMutex - lockDeviceGetIndex sync.RWMutex - lockDeviceGetInforomConfigurationChecksum sync.RWMutex - lockDeviceGetInforomImageVersion sync.RWMutex - lockDeviceGetInforomVersion sync.RWMutex - lockDeviceGetIrqNum sync.RWMutex - lockDeviceGetMPSComputeRunningProcesses sync.RWMutex - lockDeviceGetMaxClockInfo sync.RWMutex - lockDeviceGetMaxCustomerBoostClock sync.RWMutex - lockDeviceGetMaxMigDeviceCount sync.RWMutex - lockDeviceGetMaxPcieLinkGeneration sync.RWMutex - lockDeviceGetMaxPcieLinkWidth sync.RWMutex - lockDeviceGetMemClkMinMaxVfOffset sync.RWMutex - lockDeviceGetMemClkVfOffset sync.RWMutex - lockDeviceGetMemoryAffinity sync.RWMutex - lockDeviceGetMemoryBusWidth sync.RWMutex - lockDeviceGetMemoryErrorCounter sync.RWMutex - lockDeviceGetMemoryInfo sync.RWMutex - lockDeviceGetMemoryInfo_v2 sync.RWMutex - lockDeviceGetMigDeviceHandleByIndex sync.RWMutex - lockDeviceGetMigMode sync.RWMutex - lockDeviceGetMinMaxClockOfPState sync.RWMutex - lockDeviceGetMinMaxFanSpeed sync.RWMutex - lockDeviceGetMinorNumber sync.RWMutex - lockDeviceGetMultiGpuBoard sync.RWMutex - lockDeviceGetName sync.RWMutex - lockDeviceGetNumFans sync.RWMutex - lockDeviceGetNumGpuCores sync.RWMutex - lockDeviceGetNvLinkCapability sync.RWMutex - lockDeviceGetNvLinkErrorCounter sync.RWMutex - lockDeviceGetNvLinkRemoteDeviceType sync.RWMutex - lockDeviceGetNvLinkRemotePciInfo sync.RWMutex - lockDeviceGetNvLinkState sync.RWMutex - lockDeviceGetNvLinkUtilizationControl sync.RWMutex - lockDeviceGetNvLinkUtilizationCounter sync.RWMutex - lockDeviceGetNvLinkVersion sync.RWMutex - lockDeviceGetP2PStatus sync.RWMutex - lockDeviceGetPciInfo sync.RWMutex - lockDeviceGetPcieLinkMaxSpeed sync.RWMutex - lockDeviceGetPcieReplayCounter sync.RWMutex - lockDeviceGetPcieSpeed sync.RWMutex - lockDeviceGetPcieThroughput sync.RWMutex - lockDeviceGetPerformanceState sync.RWMutex - lockDeviceGetPersistenceMode sync.RWMutex - lockDeviceGetPgpuMetadataString sync.RWMutex - lockDeviceGetPowerManagementDefaultLimit sync.RWMutex - lockDeviceGetPowerManagementLimit sync.RWMutex - lockDeviceGetPowerManagementLimitConstraints sync.RWMutex - lockDeviceGetPowerManagementMode sync.RWMutex - lockDeviceGetPowerSource sync.RWMutex - lockDeviceGetPowerState sync.RWMutex - lockDeviceGetPowerUsage sync.RWMutex - lockDeviceGetProcessUtilization sync.RWMutex - lockDeviceGetRemappedRows sync.RWMutex - lockDeviceGetRetiredPages sync.RWMutex - lockDeviceGetRetiredPagesPendingStatus sync.RWMutex - lockDeviceGetRetiredPages_v2 sync.RWMutex - lockDeviceGetRowRemapperHistogram sync.RWMutex - lockDeviceGetSamples sync.RWMutex - lockDeviceGetSerial sync.RWMutex - lockDeviceGetSupportedClocksThrottleReasons sync.RWMutex - lockDeviceGetSupportedEventTypes sync.RWMutex - lockDeviceGetSupportedGraphicsClocks sync.RWMutex - lockDeviceGetSupportedMemoryClocks sync.RWMutex - lockDeviceGetSupportedPerformanceStates sync.RWMutex - lockDeviceGetSupportedVgpus sync.RWMutex - lockDeviceGetTargetFanSpeed sync.RWMutex - lockDeviceGetTemperature sync.RWMutex - lockDeviceGetTemperatureThreshold sync.RWMutex - lockDeviceGetThermalSettings sync.RWMutex - lockDeviceGetTopologyCommonAncestor sync.RWMutex - lockDeviceGetTopologyNearestGpus sync.RWMutex - lockDeviceGetTotalEccErrors sync.RWMutex - lockDeviceGetTotalEnergyConsumption sync.RWMutex - lockDeviceGetUUID sync.RWMutex - lockDeviceGetUtilizationRates sync.RWMutex - lockDeviceGetVbiosVersion sync.RWMutex - lockDeviceGetVgpuCapabilities sync.RWMutex - lockDeviceGetVgpuMetadata sync.RWMutex - lockDeviceGetVgpuProcessUtilization sync.RWMutex - lockDeviceGetVgpuSchedulerCapabilities sync.RWMutex - lockDeviceGetVgpuSchedulerLog sync.RWMutex - lockDeviceGetVgpuSchedulerState sync.RWMutex - lockDeviceGetVgpuUtilization sync.RWMutex - lockDeviceGetViolationStatus sync.RWMutex - lockDeviceGetVirtualizationMode sync.RWMutex - lockDeviceIsMigDeviceHandle sync.RWMutex - lockDeviceModifyDrainState sync.RWMutex - lockDeviceOnSameBoard sync.RWMutex - lockDeviceQueryDrainState sync.RWMutex - lockDeviceRegisterEvents sync.RWMutex - lockDeviceRemoveGpu sync.RWMutex - lockDeviceRemoveGpu_v2 sync.RWMutex - lockDeviceResetApplicationsClocks sync.RWMutex - lockDeviceResetGpuLockedClocks sync.RWMutex - lockDeviceResetMemoryLockedClocks sync.RWMutex - lockDeviceResetNvLinkErrorCounters sync.RWMutex - lockDeviceResetNvLinkUtilizationCounter sync.RWMutex - lockDeviceSetAPIRestriction sync.RWMutex - lockDeviceSetAccountingMode sync.RWMutex - lockDeviceSetApplicationsClocks sync.RWMutex - lockDeviceSetAutoBoostedClocksEnabled sync.RWMutex - lockDeviceSetComputeMode sync.RWMutex - lockDeviceSetCpuAffinity sync.RWMutex - lockDeviceSetDefaultAutoBoostedClocksEnabled sync.RWMutex - lockDeviceSetDefaultFanSpeed_v2 sync.RWMutex - lockDeviceSetDriverModel sync.RWMutex - lockDeviceSetEccMode sync.RWMutex - lockDeviceSetFanControlPolicy sync.RWMutex - lockDeviceSetFanSpeed_v2 sync.RWMutex - lockDeviceSetGpcClkVfOffset sync.RWMutex - lockDeviceSetGpuLockedClocks sync.RWMutex - lockDeviceSetGpuOperationMode sync.RWMutex - lockDeviceSetMemClkVfOffset sync.RWMutex - lockDeviceSetMemoryLockedClocks sync.RWMutex - lockDeviceSetMigMode sync.RWMutex - lockDeviceSetNvLinkDeviceLowPowerThreshold sync.RWMutex - lockDeviceSetNvLinkUtilizationControl sync.RWMutex - lockDeviceSetPersistenceMode sync.RWMutex - lockDeviceSetPowerManagementLimit sync.RWMutex - lockDeviceSetTemperatureThreshold sync.RWMutex - lockDeviceSetVgpuSchedulerState sync.RWMutex - lockDeviceSetVirtualizationMode sync.RWMutex - lockDeviceValidateInforom sync.RWMutex - lockErrorString sync.RWMutex - lockEventSetCreate sync.RWMutex - lockEventSetFree sync.RWMutex - lockEventSetWait sync.RWMutex - lockExtensions sync.RWMutex - lockGetExcludedDeviceCount sync.RWMutex - lockGetExcludedDeviceInfoByIndex sync.RWMutex - lockGetVgpuCompatibility sync.RWMutex - lockGetVgpuDriverCapabilities sync.RWMutex - lockGetVgpuVersion sync.RWMutex - lockGpmMetricsGet sync.RWMutex - lockGpmMetricsGetV sync.RWMutex - lockGpmMigSampleGet sync.RWMutex - lockGpmQueryDeviceSupport sync.RWMutex - lockGpmQueryDeviceSupportV sync.RWMutex - lockGpmSampleAlloc sync.RWMutex - lockGpmSampleFree sync.RWMutex - lockGpmSampleGet sync.RWMutex - lockGpuInstanceCreateComputeInstance sync.RWMutex - lockGpuInstanceCreateComputeInstanceWithPlacement sync.RWMutex - lockGpuInstanceDestroy sync.RWMutex - lockGpuInstanceGetComputeInstanceById sync.RWMutex - lockGpuInstanceGetComputeInstancePossiblePlacements sync.RWMutex - lockGpuInstanceGetComputeInstanceProfileInfo sync.RWMutex - lockGpuInstanceGetComputeInstanceProfileInfoV sync.RWMutex - lockGpuInstanceGetComputeInstanceRemainingCapacity sync.RWMutex - lockGpuInstanceGetComputeInstances sync.RWMutex - lockGpuInstanceGetInfo sync.RWMutex - lockInit sync.RWMutex - lockInitWithFlags sync.RWMutex - lockSetVgpuVersion sync.RWMutex - lockShutdown sync.RWMutex - lockSystemGetCudaDriverVersion sync.RWMutex - lockSystemGetCudaDriverVersion_v2 sync.RWMutex - lockSystemGetDriverVersion sync.RWMutex - lockSystemGetHicVersion sync.RWMutex - lockSystemGetNVMLVersion sync.RWMutex - lockSystemGetProcessName sync.RWMutex - lockSystemGetTopologyGpuSet sync.RWMutex - lockUnitGetCount sync.RWMutex - lockUnitGetDevices sync.RWMutex - lockUnitGetFanSpeedInfo sync.RWMutex - lockUnitGetHandleByIndex sync.RWMutex - lockUnitGetLedState sync.RWMutex - lockUnitGetPsuInfo sync.RWMutex - lockUnitGetTemperature sync.RWMutex - lockUnitGetUnitInfo sync.RWMutex - lockUnitSetLedState sync.RWMutex - lockVgpuInstanceClearAccountingPids sync.RWMutex - lockVgpuInstanceGetAccountingMode sync.RWMutex - lockVgpuInstanceGetAccountingPids sync.RWMutex - lockVgpuInstanceGetAccountingStats sync.RWMutex - lockVgpuInstanceGetEccMode sync.RWMutex - lockVgpuInstanceGetEncoderCapacity sync.RWMutex - lockVgpuInstanceGetEncoderSessions sync.RWMutex - lockVgpuInstanceGetEncoderStats sync.RWMutex - lockVgpuInstanceGetFBCSessions sync.RWMutex - lockVgpuInstanceGetFBCStats sync.RWMutex - lockVgpuInstanceGetFbUsage sync.RWMutex - lockVgpuInstanceGetFrameRateLimit sync.RWMutex - lockVgpuInstanceGetGpuInstanceId sync.RWMutex - lockVgpuInstanceGetGpuPciId sync.RWMutex - lockVgpuInstanceGetLicenseInfo sync.RWMutex - lockVgpuInstanceGetLicenseStatus sync.RWMutex - lockVgpuInstanceGetMdevUUID sync.RWMutex - lockVgpuInstanceGetMetadata sync.RWMutex - lockVgpuInstanceGetType sync.RWMutex - lockVgpuInstanceGetUUID sync.RWMutex - lockVgpuInstanceGetVmDriverVersion sync.RWMutex - lockVgpuInstanceGetVmID sync.RWMutex - lockVgpuInstanceSetEncoderCapacity sync.RWMutex - lockVgpuTypeGetCapabilities sync.RWMutex - lockVgpuTypeGetClass sync.RWMutex - lockVgpuTypeGetDeviceID sync.RWMutex - lockVgpuTypeGetFrameRateLimit sync.RWMutex - lockVgpuTypeGetFramebufferSize sync.RWMutex - lockVgpuTypeGetGpuInstanceProfileId sync.RWMutex - lockVgpuTypeGetLicense sync.RWMutex - lockVgpuTypeGetMaxInstances sync.RWMutex - lockVgpuTypeGetMaxInstancesPerVm sync.RWMutex - lockVgpuTypeGetName sync.RWMutex - lockVgpuTypeGetNumDisplayHeads sync.RWMutex - lockVgpuTypeGetResolution sync.RWMutex + lockComputeInstanceDestroy sync.RWMutex + lockComputeInstanceGetInfo sync.RWMutex + lockDeviceClearAccountingPids sync.RWMutex + lockDeviceClearCpuAffinity sync.RWMutex + lockDeviceClearEccErrorCounts sync.RWMutex + lockDeviceClearFieldValues sync.RWMutex + lockDeviceCreateGpuInstance sync.RWMutex + lockDeviceCreateGpuInstanceWithPlacement sync.RWMutex + lockDeviceDiscoverGpus sync.RWMutex + lockDeviceFreezeNvLinkUtilizationCounter sync.RWMutex + lockDeviceGetAPIRestriction sync.RWMutex + lockDeviceGetAccountingBufferSize sync.RWMutex + lockDeviceGetAccountingMode sync.RWMutex + lockDeviceGetAccountingPids sync.RWMutex + lockDeviceGetAccountingStats sync.RWMutex + lockDeviceGetActiveVgpus sync.RWMutex + lockDeviceGetAdaptiveClockInfoStatus sync.RWMutex + lockDeviceGetApplicationsClock sync.RWMutex + lockDeviceGetArchitecture sync.RWMutex + lockDeviceGetAttributes sync.RWMutex + lockDeviceGetAutoBoostedClocksEnabled sync.RWMutex + lockDeviceGetBAR1MemoryInfo sync.RWMutex + lockDeviceGetBoardId sync.RWMutex + lockDeviceGetBoardPartNumber sync.RWMutex + lockDeviceGetBrand sync.RWMutex + lockDeviceGetBridgeChipInfo sync.RWMutex + lockDeviceGetBusType sync.RWMutex + lockDeviceGetC2cModeInfoV sync.RWMutex + lockDeviceGetCapabilities sync.RWMutex + lockDeviceGetClkMonStatus sync.RWMutex + lockDeviceGetClock sync.RWMutex + lockDeviceGetClockInfo sync.RWMutex + lockDeviceGetClockOffsets sync.RWMutex + lockDeviceGetComputeInstanceId sync.RWMutex + lockDeviceGetComputeMode sync.RWMutex + lockDeviceGetComputeRunningProcesses sync.RWMutex + lockDeviceGetConfComputeGpuAttestationReport sync.RWMutex + lockDeviceGetConfComputeGpuCertificate sync.RWMutex + lockDeviceGetConfComputeMemSizeInfo sync.RWMutex + lockDeviceGetConfComputeProtectedMemoryUsage sync.RWMutex + lockDeviceGetCoolerInfo sync.RWMutex + lockDeviceGetCount sync.RWMutex + lockDeviceGetCpuAffinity sync.RWMutex + lockDeviceGetCpuAffinityWithinScope sync.RWMutex + lockDeviceGetCreatableVgpus sync.RWMutex + lockDeviceGetCudaComputeCapability sync.RWMutex + lockDeviceGetCurrPcieLinkGeneration sync.RWMutex + lockDeviceGetCurrPcieLinkWidth sync.RWMutex + lockDeviceGetCurrentClockFreqs sync.RWMutex + lockDeviceGetCurrentClocksEventReasons sync.RWMutex + lockDeviceGetCurrentClocksThrottleReasons sync.RWMutex + lockDeviceGetDecoderUtilization sync.RWMutex + lockDeviceGetDefaultApplicationsClock sync.RWMutex + lockDeviceGetDefaultEccMode sync.RWMutex + lockDeviceGetDetailedEccErrors sync.RWMutex + lockDeviceGetDeviceHandleFromMigDeviceHandle sync.RWMutex + lockDeviceGetDisplayActive sync.RWMutex + lockDeviceGetDisplayMode sync.RWMutex + lockDeviceGetDramEncryptionMode sync.RWMutex + lockDeviceGetDriverModel sync.RWMutex + lockDeviceGetDriverModel_v2 sync.RWMutex + lockDeviceGetDynamicPstatesInfo sync.RWMutex + lockDeviceGetEccMode sync.RWMutex + lockDeviceGetEncoderCapacity sync.RWMutex + lockDeviceGetEncoderSessions sync.RWMutex + lockDeviceGetEncoderStats sync.RWMutex + lockDeviceGetEncoderUtilization sync.RWMutex + lockDeviceGetEnforcedPowerLimit sync.RWMutex + lockDeviceGetFBCSessions sync.RWMutex + lockDeviceGetFBCStats sync.RWMutex + lockDeviceGetFanControlPolicy_v2 sync.RWMutex + lockDeviceGetFanSpeed sync.RWMutex + lockDeviceGetFanSpeedRPM sync.RWMutex + lockDeviceGetFanSpeed_v2 sync.RWMutex + lockDeviceGetFieldValues sync.RWMutex + lockDeviceGetGpcClkMinMaxVfOffset sync.RWMutex + lockDeviceGetGpcClkVfOffset sync.RWMutex + lockDeviceGetGpuFabricInfo sync.RWMutex + lockDeviceGetGpuFabricInfoV sync.RWMutex + lockDeviceGetGpuInstanceById sync.RWMutex + lockDeviceGetGpuInstanceId sync.RWMutex + lockDeviceGetGpuInstancePossiblePlacements sync.RWMutex + lockDeviceGetGpuInstanceProfileInfo sync.RWMutex + lockDeviceGetGpuInstanceProfileInfoV sync.RWMutex + lockDeviceGetGpuInstanceRemainingCapacity sync.RWMutex + lockDeviceGetGpuInstances sync.RWMutex + lockDeviceGetGpuMaxPcieLinkGeneration sync.RWMutex + lockDeviceGetGpuOperationMode sync.RWMutex + lockDeviceGetGraphicsRunningProcesses sync.RWMutex + lockDeviceGetGridLicensableFeatures sync.RWMutex + lockDeviceGetGspFirmwareMode sync.RWMutex + lockDeviceGetGspFirmwareVersion sync.RWMutex + lockDeviceGetHandleByIndex sync.RWMutex + lockDeviceGetHandleByPciBusId sync.RWMutex + lockDeviceGetHandleBySerial sync.RWMutex + lockDeviceGetHandleByUUID sync.RWMutex + lockDeviceGetHandleByUUIDV sync.RWMutex + lockDeviceGetHostVgpuMode sync.RWMutex + lockDeviceGetIndex sync.RWMutex + lockDeviceGetInforomConfigurationChecksum sync.RWMutex + lockDeviceGetInforomImageVersion sync.RWMutex + lockDeviceGetInforomVersion sync.RWMutex + lockDeviceGetIrqNum sync.RWMutex + lockDeviceGetJpgUtilization sync.RWMutex + lockDeviceGetLastBBXFlushTime sync.RWMutex + lockDeviceGetMPSComputeRunningProcesses sync.RWMutex + lockDeviceGetMarginTemperature sync.RWMutex + lockDeviceGetMaxClockInfo sync.RWMutex + lockDeviceGetMaxCustomerBoostClock sync.RWMutex + lockDeviceGetMaxMigDeviceCount sync.RWMutex + lockDeviceGetMaxPcieLinkGeneration sync.RWMutex + lockDeviceGetMaxPcieLinkWidth sync.RWMutex + lockDeviceGetMemClkMinMaxVfOffset sync.RWMutex + lockDeviceGetMemClkVfOffset sync.RWMutex + lockDeviceGetMemoryAffinity sync.RWMutex + lockDeviceGetMemoryBusWidth sync.RWMutex + lockDeviceGetMemoryErrorCounter sync.RWMutex + lockDeviceGetMemoryInfo sync.RWMutex + lockDeviceGetMemoryInfo_v2 sync.RWMutex + lockDeviceGetMigDeviceHandleByIndex sync.RWMutex + lockDeviceGetMigMode sync.RWMutex + lockDeviceGetMinMaxClockOfPState sync.RWMutex + lockDeviceGetMinMaxFanSpeed sync.RWMutex + lockDeviceGetMinorNumber sync.RWMutex + lockDeviceGetModuleId sync.RWMutex + lockDeviceGetMultiGpuBoard sync.RWMutex + lockDeviceGetName sync.RWMutex + lockDeviceGetNumFans sync.RWMutex + lockDeviceGetNumGpuCores sync.RWMutex + lockDeviceGetNumaNodeId sync.RWMutex + lockDeviceGetNvLinkCapability sync.RWMutex + lockDeviceGetNvLinkErrorCounter sync.RWMutex + lockDeviceGetNvLinkRemoteDeviceType sync.RWMutex + lockDeviceGetNvLinkRemotePciInfo sync.RWMutex + lockDeviceGetNvLinkState sync.RWMutex + lockDeviceGetNvLinkUtilizationControl sync.RWMutex + lockDeviceGetNvLinkUtilizationCounter sync.RWMutex + lockDeviceGetNvLinkVersion sync.RWMutex + lockDeviceGetNvlinkBwMode sync.RWMutex + lockDeviceGetNvlinkSupportedBwModes sync.RWMutex + lockDeviceGetOfaUtilization sync.RWMutex + lockDeviceGetP2PStatus sync.RWMutex + lockDeviceGetPciInfo sync.RWMutex + lockDeviceGetPciInfoExt sync.RWMutex + lockDeviceGetPcieLinkMaxSpeed sync.RWMutex + lockDeviceGetPcieReplayCounter sync.RWMutex + lockDeviceGetPcieSpeed sync.RWMutex + lockDeviceGetPcieThroughput sync.RWMutex + lockDeviceGetPerformanceModes sync.RWMutex + lockDeviceGetPerformanceState sync.RWMutex + lockDeviceGetPersistenceMode sync.RWMutex + lockDeviceGetPgpuMetadataString sync.RWMutex + lockDeviceGetPlatformInfo sync.RWMutex + lockDeviceGetPowerManagementDefaultLimit sync.RWMutex + lockDeviceGetPowerManagementLimit sync.RWMutex + lockDeviceGetPowerManagementLimitConstraints sync.RWMutex + lockDeviceGetPowerManagementMode sync.RWMutex + lockDeviceGetPowerSource sync.RWMutex + lockDeviceGetPowerState sync.RWMutex + lockDeviceGetPowerUsage sync.RWMutex + lockDeviceGetProcessUtilization sync.RWMutex + lockDeviceGetProcessesUtilizationInfo sync.RWMutex + lockDeviceGetRemappedRows sync.RWMutex + lockDeviceGetRetiredPages sync.RWMutex + lockDeviceGetRetiredPagesPendingStatus sync.RWMutex + lockDeviceGetRetiredPages_v2 sync.RWMutex + lockDeviceGetRowRemapperHistogram sync.RWMutex + lockDeviceGetRunningProcessDetailList sync.RWMutex + lockDeviceGetSamples sync.RWMutex + lockDeviceGetSerial sync.RWMutex + lockDeviceGetSramEccErrorStatus sync.RWMutex + lockDeviceGetSupportedClocksEventReasons sync.RWMutex + lockDeviceGetSupportedClocksThrottleReasons sync.RWMutex + lockDeviceGetSupportedEventTypes sync.RWMutex + lockDeviceGetSupportedGraphicsClocks sync.RWMutex + lockDeviceGetSupportedMemoryClocks sync.RWMutex + lockDeviceGetSupportedPerformanceStates sync.RWMutex + lockDeviceGetSupportedVgpus sync.RWMutex + lockDeviceGetTargetFanSpeed sync.RWMutex + lockDeviceGetTemperature sync.RWMutex + lockDeviceGetTemperatureThreshold sync.RWMutex + lockDeviceGetTemperatureV sync.RWMutex + lockDeviceGetThermalSettings sync.RWMutex + lockDeviceGetTopologyCommonAncestor sync.RWMutex + lockDeviceGetTopologyNearestGpus sync.RWMutex + lockDeviceGetTotalEccErrors sync.RWMutex + lockDeviceGetTotalEnergyConsumption sync.RWMutex + lockDeviceGetUUID sync.RWMutex + lockDeviceGetUtilizationRates sync.RWMutex + lockDeviceGetVbiosVersion sync.RWMutex + lockDeviceGetVgpuCapabilities sync.RWMutex + lockDeviceGetVgpuHeterogeneousMode sync.RWMutex + lockDeviceGetVgpuInstancesUtilizationInfo sync.RWMutex + lockDeviceGetVgpuMetadata sync.RWMutex + lockDeviceGetVgpuProcessUtilization sync.RWMutex + lockDeviceGetVgpuProcessesUtilizationInfo sync.RWMutex + lockDeviceGetVgpuSchedulerCapabilities sync.RWMutex + lockDeviceGetVgpuSchedulerLog sync.RWMutex + lockDeviceGetVgpuSchedulerState sync.RWMutex + lockDeviceGetVgpuTypeCreatablePlacements sync.RWMutex + lockDeviceGetVgpuTypeSupportedPlacements sync.RWMutex + lockDeviceGetVgpuUtilization sync.RWMutex + lockDeviceGetViolationStatus sync.RWMutex + lockDeviceGetVirtualizationMode sync.RWMutex + lockDeviceIsMigDeviceHandle sync.RWMutex + lockDeviceModifyDrainState sync.RWMutex + lockDeviceOnSameBoard sync.RWMutex + lockDevicePowerSmoothingActivatePresetProfile sync.RWMutex + lockDevicePowerSmoothingSetState sync.RWMutex + lockDevicePowerSmoothingUpdatePresetProfileParam sync.RWMutex + lockDeviceQueryDrainState sync.RWMutex + lockDeviceRegisterEvents sync.RWMutex + lockDeviceRemoveGpu sync.RWMutex + lockDeviceRemoveGpu_v2 sync.RWMutex + lockDeviceResetApplicationsClocks sync.RWMutex + lockDeviceResetGpuLockedClocks sync.RWMutex + lockDeviceResetMemoryLockedClocks sync.RWMutex + lockDeviceResetNvLinkErrorCounters sync.RWMutex + lockDeviceResetNvLinkUtilizationCounter sync.RWMutex + lockDeviceSetAPIRestriction sync.RWMutex + lockDeviceSetAccountingMode sync.RWMutex + lockDeviceSetApplicationsClocks sync.RWMutex + lockDeviceSetAutoBoostedClocksEnabled sync.RWMutex + lockDeviceSetClockOffsets sync.RWMutex + lockDeviceSetComputeMode sync.RWMutex + lockDeviceSetConfComputeUnprotectedMemSize sync.RWMutex + lockDeviceSetCpuAffinity sync.RWMutex + lockDeviceSetDefaultAutoBoostedClocksEnabled sync.RWMutex + lockDeviceSetDefaultFanSpeed_v2 sync.RWMutex + lockDeviceSetDramEncryptionMode sync.RWMutex + lockDeviceSetDriverModel sync.RWMutex + lockDeviceSetEccMode sync.RWMutex + lockDeviceSetFanControlPolicy sync.RWMutex + lockDeviceSetFanSpeed_v2 sync.RWMutex + lockDeviceSetGpcClkVfOffset sync.RWMutex + lockDeviceSetGpuLockedClocks sync.RWMutex + lockDeviceSetGpuOperationMode sync.RWMutex + lockDeviceSetMemClkVfOffset sync.RWMutex + lockDeviceSetMemoryLockedClocks sync.RWMutex + lockDeviceSetMigMode sync.RWMutex + lockDeviceSetNvLinkDeviceLowPowerThreshold sync.RWMutex + lockDeviceSetNvLinkUtilizationControl sync.RWMutex + lockDeviceSetNvlinkBwMode sync.RWMutex + lockDeviceSetPersistenceMode sync.RWMutex + lockDeviceSetPowerManagementLimit sync.RWMutex + lockDeviceSetPowerManagementLimit_v2 sync.RWMutex + lockDeviceSetTemperatureThreshold sync.RWMutex + lockDeviceSetVgpuCapabilities sync.RWMutex + lockDeviceSetVgpuHeterogeneousMode sync.RWMutex + lockDeviceSetVgpuSchedulerState sync.RWMutex + lockDeviceSetVirtualizationMode sync.RWMutex + lockDeviceValidateInforom sync.RWMutex + lockDeviceWorkloadPowerProfileClearRequestedProfiles sync.RWMutex + lockDeviceWorkloadPowerProfileGetCurrentProfiles sync.RWMutex + lockDeviceWorkloadPowerProfileGetProfilesInfo sync.RWMutex + lockDeviceWorkloadPowerProfileSetRequestedProfiles sync.RWMutex + lockErrorString sync.RWMutex + lockEventSetCreate sync.RWMutex + lockEventSetFree sync.RWMutex + lockEventSetWait sync.RWMutex + lockExtensions sync.RWMutex + lockGetExcludedDeviceCount sync.RWMutex + lockGetExcludedDeviceInfoByIndex sync.RWMutex + lockGetVgpuCompatibility sync.RWMutex + lockGetVgpuDriverCapabilities sync.RWMutex + lockGetVgpuVersion sync.RWMutex + lockGpmMetricsGet sync.RWMutex + lockGpmMetricsGetV sync.RWMutex + lockGpmMigSampleGet sync.RWMutex + lockGpmQueryDeviceSupport sync.RWMutex + lockGpmQueryDeviceSupportV sync.RWMutex + lockGpmQueryIfStreamingEnabled sync.RWMutex + lockGpmSampleAlloc sync.RWMutex + lockGpmSampleFree sync.RWMutex + lockGpmSampleGet sync.RWMutex + lockGpmSetStreamingEnabled sync.RWMutex + lockGpuInstanceCreateComputeInstance sync.RWMutex + lockGpuInstanceCreateComputeInstanceWithPlacement sync.RWMutex + lockGpuInstanceDestroy sync.RWMutex + lockGpuInstanceGetActiveVgpus sync.RWMutex + lockGpuInstanceGetComputeInstanceById sync.RWMutex + lockGpuInstanceGetComputeInstancePossiblePlacements sync.RWMutex + lockGpuInstanceGetComputeInstanceProfileInfo sync.RWMutex + lockGpuInstanceGetComputeInstanceProfileInfoV sync.RWMutex + lockGpuInstanceGetComputeInstanceRemainingCapacity sync.RWMutex + lockGpuInstanceGetComputeInstances sync.RWMutex + lockGpuInstanceGetCreatableVgpus sync.RWMutex + lockGpuInstanceGetInfo sync.RWMutex + lockGpuInstanceGetVgpuHeterogeneousMode sync.RWMutex + lockGpuInstanceGetVgpuSchedulerLog sync.RWMutex + lockGpuInstanceGetVgpuSchedulerState sync.RWMutex + lockGpuInstanceGetVgpuTypeCreatablePlacements sync.RWMutex + lockGpuInstanceSetVgpuHeterogeneousMode sync.RWMutex + lockGpuInstanceSetVgpuSchedulerState sync.RWMutex + lockInit sync.RWMutex + lockInitWithFlags sync.RWMutex + lockSetVgpuVersion sync.RWMutex + lockShutdown sync.RWMutex + lockSystemEventSetCreate sync.RWMutex + lockSystemEventSetFree sync.RWMutex + lockSystemEventSetWait sync.RWMutex + lockSystemGetConfComputeCapabilities sync.RWMutex + lockSystemGetConfComputeGpusReadyState sync.RWMutex + lockSystemGetConfComputeKeyRotationThresholdInfo sync.RWMutex + lockSystemGetConfComputeSettings sync.RWMutex + lockSystemGetConfComputeState sync.RWMutex + lockSystemGetCudaDriverVersion sync.RWMutex + lockSystemGetCudaDriverVersion_v2 sync.RWMutex + lockSystemGetDriverBranch sync.RWMutex + lockSystemGetDriverVersion sync.RWMutex + lockSystemGetHicVersion sync.RWMutex + lockSystemGetNVMLVersion sync.RWMutex + lockSystemGetNvlinkBwMode sync.RWMutex + lockSystemGetProcessName sync.RWMutex + lockSystemGetTopologyGpuSet sync.RWMutex + lockSystemRegisterEvents sync.RWMutex + lockSystemSetConfComputeGpusReadyState sync.RWMutex + lockSystemSetConfComputeKeyRotationThresholdInfo sync.RWMutex + lockSystemSetNvlinkBwMode sync.RWMutex + lockUnitGetCount sync.RWMutex + lockUnitGetDevices sync.RWMutex + lockUnitGetFanSpeedInfo sync.RWMutex + lockUnitGetHandleByIndex sync.RWMutex + lockUnitGetLedState sync.RWMutex + lockUnitGetPsuInfo sync.RWMutex + lockUnitGetTemperature sync.RWMutex + lockUnitGetUnitInfo sync.RWMutex + lockUnitSetLedState sync.RWMutex + lockVgpuInstanceClearAccountingPids sync.RWMutex + lockVgpuInstanceGetAccountingMode sync.RWMutex + lockVgpuInstanceGetAccountingPids sync.RWMutex + lockVgpuInstanceGetAccountingStats sync.RWMutex + lockVgpuInstanceGetEccMode sync.RWMutex + lockVgpuInstanceGetEncoderCapacity sync.RWMutex + lockVgpuInstanceGetEncoderSessions sync.RWMutex + lockVgpuInstanceGetEncoderStats sync.RWMutex + lockVgpuInstanceGetFBCSessions sync.RWMutex + lockVgpuInstanceGetFBCStats sync.RWMutex + lockVgpuInstanceGetFbUsage sync.RWMutex + lockVgpuInstanceGetFrameRateLimit sync.RWMutex + lockVgpuInstanceGetGpuInstanceId sync.RWMutex + lockVgpuInstanceGetGpuPciId sync.RWMutex + lockVgpuInstanceGetLicenseInfo sync.RWMutex + lockVgpuInstanceGetLicenseStatus sync.RWMutex + lockVgpuInstanceGetMdevUUID sync.RWMutex + lockVgpuInstanceGetMetadata sync.RWMutex + lockVgpuInstanceGetRuntimeStateSize sync.RWMutex + lockVgpuInstanceGetType sync.RWMutex + lockVgpuInstanceGetUUID sync.RWMutex + lockVgpuInstanceGetVmDriverVersion sync.RWMutex + lockVgpuInstanceGetVmID sync.RWMutex + lockVgpuInstanceSetEncoderCapacity sync.RWMutex + lockVgpuTypeGetBAR1Info sync.RWMutex + lockVgpuTypeGetCapabilities sync.RWMutex + lockVgpuTypeGetClass sync.RWMutex + lockVgpuTypeGetDeviceID sync.RWMutex + lockVgpuTypeGetFrameRateLimit sync.RWMutex + lockVgpuTypeGetFramebufferSize sync.RWMutex + lockVgpuTypeGetGpuInstanceProfileId sync.RWMutex + lockVgpuTypeGetLicense sync.RWMutex + lockVgpuTypeGetMaxInstances sync.RWMutex + lockVgpuTypeGetMaxInstancesPerGpuInstance sync.RWMutex + lockVgpuTypeGetMaxInstancesPerVm sync.RWMutex + lockVgpuTypeGetName sync.RWMutex + lockVgpuTypeGetNumDisplayHeads sync.RWMutex + lockVgpuTypeGetResolution sync.RWMutex } // ComputeInstanceDestroy calls ComputeInstanceDestroyFunc. @@ -3842,74 +4762,6 @@ func (mock *Interface) ComputeInstanceGetInfoCalls() []struct { return calls } -// DeviceCcuGetStreamState calls DeviceCcuGetStreamStateFunc. -func (mock *Interface) DeviceCcuGetStreamState(device nvml.Device) (int, nvml.Return) { - if mock.DeviceCcuGetStreamStateFunc == nil { - panic("Interface.DeviceCcuGetStreamStateFunc: method is nil but Interface.DeviceCcuGetStreamState was just called") - } - callInfo := struct { - Device nvml.Device - }{ - Device: device, - } - mock.lockDeviceCcuGetStreamState.Lock() - mock.calls.DeviceCcuGetStreamState = append(mock.calls.DeviceCcuGetStreamState, callInfo) - mock.lockDeviceCcuGetStreamState.Unlock() - return mock.DeviceCcuGetStreamStateFunc(device) -} - -// DeviceCcuGetStreamStateCalls gets all the calls that were made to DeviceCcuGetStreamState. -// Check the length with: -// -// len(mockedInterface.DeviceCcuGetStreamStateCalls()) -func (mock *Interface) DeviceCcuGetStreamStateCalls() []struct { - Device nvml.Device -} { - var calls []struct { - Device nvml.Device - } - mock.lockDeviceCcuGetStreamState.RLock() - calls = mock.calls.DeviceCcuGetStreamState - mock.lockDeviceCcuGetStreamState.RUnlock() - return calls -} - -// DeviceCcuSetStreamState calls DeviceCcuSetStreamStateFunc. -func (mock *Interface) DeviceCcuSetStreamState(device nvml.Device, n int) nvml.Return { - if mock.DeviceCcuSetStreamStateFunc == nil { - panic("Interface.DeviceCcuSetStreamStateFunc: method is nil but Interface.DeviceCcuSetStreamState was just called") - } - callInfo := struct { - Device nvml.Device - N int - }{ - Device: device, - N: n, - } - mock.lockDeviceCcuSetStreamState.Lock() - mock.calls.DeviceCcuSetStreamState = append(mock.calls.DeviceCcuSetStreamState, callInfo) - mock.lockDeviceCcuSetStreamState.Unlock() - return mock.DeviceCcuSetStreamStateFunc(device, n) -} - -// DeviceCcuSetStreamStateCalls gets all the calls that were made to DeviceCcuSetStreamState. -// Check the length with: -// -// len(mockedInterface.DeviceCcuSetStreamStateCalls()) -func (mock *Interface) DeviceCcuSetStreamStateCalls() []struct { - Device nvml.Device - N int -} { - var calls []struct { - Device nvml.Device - N int - } - mock.lockDeviceCcuSetStreamState.RLock() - calls = mock.calls.DeviceCcuSetStreamState - mock.lockDeviceCcuSetStreamState.RUnlock() - return calls -} - // DeviceClearAccountingPids calls DeviceClearAccountingPidsFunc. func (mock *Interface) DeviceClearAccountingPids(device nvml.Device) nvml.Return { if mock.DeviceClearAccountingPidsFunc == nil { @@ -4749,6 +5601,70 @@ func (mock *Interface) DeviceGetBusTypeCalls() []struct { return calls } +// DeviceGetC2cModeInfoV calls DeviceGetC2cModeInfoVFunc. +func (mock *Interface) DeviceGetC2cModeInfoV(device nvml.Device) nvml.C2cModeInfoHandler { + if mock.DeviceGetC2cModeInfoVFunc == nil { + panic("Interface.DeviceGetC2cModeInfoVFunc: method is nil but Interface.DeviceGetC2cModeInfoV was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetC2cModeInfoV.Lock() + mock.calls.DeviceGetC2cModeInfoV = append(mock.calls.DeviceGetC2cModeInfoV, callInfo) + mock.lockDeviceGetC2cModeInfoV.Unlock() + return mock.DeviceGetC2cModeInfoVFunc(device) +} + +// DeviceGetC2cModeInfoVCalls gets all the calls that were made to DeviceGetC2cModeInfoV. +// Check the length with: +// +// len(mockedInterface.DeviceGetC2cModeInfoVCalls()) +func (mock *Interface) DeviceGetC2cModeInfoVCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetC2cModeInfoV.RLock() + calls = mock.calls.DeviceGetC2cModeInfoV + mock.lockDeviceGetC2cModeInfoV.RUnlock() + return calls +} + +// DeviceGetCapabilities calls DeviceGetCapabilitiesFunc. +func (mock *Interface) DeviceGetCapabilities(device nvml.Device) (nvml.DeviceCapabilities, nvml.Return) { + if mock.DeviceGetCapabilitiesFunc == nil { + panic("Interface.DeviceGetCapabilitiesFunc: method is nil but Interface.DeviceGetCapabilities was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetCapabilities.Lock() + mock.calls.DeviceGetCapabilities = append(mock.calls.DeviceGetCapabilities, callInfo) + mock.lockDeviceGetCapabilities.Unlock() + return mock.DeviceGetCapabilitiesFunc(device) +} + +// DeviceGetCapabilitiesCalls gets all the calls that were made to DeviceGetCapabilities. +// Check the length with: +// +// len(mockedInterface.DeviceGetCapabilitiesCalls()) +func (mock *Interface) DeviceGetCapabilitiesCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetCapabilities.RLock() + calls = mock.calls.DeviceGetCapabilities + mock.lockDeviceGetCapabilities.RUnlock() + return calls +} + // DeviceGetClkMonStatus calls DeviceGetClkMonStatusFunc. func (mock *Interface) DeviceGetClkMonStatus(device nvml.Device) (nvml.ClkMonStatus, nvml.Return) { if mock.DeviceGetClkMonStatusFunc == nil { @@ -4857,6 +5773,38 @@ func (mock *Interface) DeviceGetClockInfoCalls() []struct { return calls } +// DeviceGetClockOffsets calls DeviceGetClockOffsetsFunc. +func (mock *Interface) DeviceGetClockOffsets(device nvml.Device) (nvml.ClockOffset, nvml.Return) { + if mock.DeviceGetClockOffsetsFunc == nil { + panic("Interface.DeviceGetClockOffsetsFunc: method is nil but Interface.DeviceGetClockOffsets was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetClockOffsets.Lock() + mock.calls.DeviceGetClockOffsets = append(mock.calls.DeviceGetClockOffsets, callInfo) + mock.lockDeviceGetClockOffsets.Unlock() + return mock.DeviceGetClockOffsetsFunc(device) +} + +// DeviceGetClockOffsetsCalls gets all the calls that were made to DeviceGetClockOffsets. +// Check the length with: +// +// len(mockedInterface.DeviceGetClockOffsetsCalls()) +func (mock *Interface) DeviceGetClockOffsetsCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetClockOffsets.RLock() + calls = mock.calls.DeviceGetClockOffsets + mock.lockDeviceGetClockOffsets.RUnlock() + return calls +} + // DeviceGetComputeInstanceId calls DeviceGetComputeInstanceIdFunc. func (mock *Interface) DeviceGetComputeInstanceId(device nvml.Device) (int, nvml.Return) { if mock.DeviceGetComputeInstanceIdFunc == nil { @@ -4953,55 +5901,215 @@ func (mock *Interface) DeviceGetComputeRunningProcessesCalls() []struct { return calls } -// DeviceGetCount calls DeviceGetCountFunc. -func (mock *Interface) DeviceGetCount() (int, nvml.Return) { - if mock.DeviceGetCountFunc == nil { - panic("Interface.DeviceGetCountFunc: method is nil but Interface.DeviceGetCount was just called") +// DeviceGetConfComputeGpuAttestationReport calls DeviceGetConfComputeGpuAttestationReportFunc. +func (mock *Interface) DeviceGetConfComputeGpuAttestationReport(device nvml.Device) (nvml.ConfComputeGpuAttestationReport, nvml.Return) { + if mock.DeviceGetConfComputeGpuAttestationReportFunc == nil { + panic("Interface.DeviceGetConfComputeGpuAttestationReportFunc: method is nil but Interface.DeviceGetConfComputeGpuAttestationReport was just called") } callInfo := struct { - }{} - mock.lockDeviceGetCount.Lock() - mock.calls.DeviceGetCount = append(mock.calls.DeviceGetCount, callInfo) - mock.lockDeviceGetCount.Unlock() - return mock.DeviceGetCountFunc() + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetConfComputeGpuAttestationReport.Lock() + mock.calls.DeviceGetConfComputeGpuAttestationReport = append(mock.calls.DeviceGetConfComputeGpuAttestationReport, callInfo) + mock.lockDeviceGetConfComputeGpuAttestationReport.Unlock() + return mock.DeviceGetConfComputeGpuAttestationReportFunc(device) } -// DeviceGetCountCalls gets all the calls that were made to DeviceGetCount. +// DeviceGetConfComputeGpuAttestationReportCalls gets all the calls that were made to DeviceGetConfComputeGpuAttestationReport. // Check the length with: // -// len(mockedInterface.DeviceGetCountCalls()) -func (mock *Interface) DeviceGetCountCalls() []struct { +// len(mockedInterface.DeviceGetConfComputeGpuAttestationReportCalls()) +func (mock *Interface) DeviceGetConfComputeGpuAttestationReportCalls() []struct { + Device nvml.Device } { var calls []struct { + Device nvml.Device } - mock.lockDeviceGetCount.RLock() - calls = mock.calls.DeviceGetCount - mock.lockDeviceGetCount.RUnlock() + mock.lockDeviceGetConfComputeGpuAttestationReport.RLock() + calls = mock.calls.DeviceGetConfComputeGpuAttestationReport + mock.lockDeviceGetConfComputeGpuAttestationReport.RUnlock() return calls } -// DeviceGetCpuAffinity calls DeviceGetCpuAffinityFunc. -func (mock *Interface) DeviceGetCpuAffinity(device nvml.Device, n int) ([]uint, nvml.Return) { - if mock.DeviceGetCpuAffinityFunc == nil { - panic("Interface.DeviceGetCpuAffinityFunc: method is nil but Interface.DeviceGetCpuAffinity was just called") +// DeviceGetConfComputeGpuCertificate calls DeviceGetConfComputeGpuCertificateFunc. +func (mock *Interface) DeviceGetConfComputeGpuCertificate(device nvml.Device) (nvml.ConfComputeGpuCertificate, nvml.Return) { + if mock.DeviceGetConfComputeGpuCertificateFunc == nil { + panic("Interface.DeviceGetConfComputeGpuCertificateFunc: method is nil but Interface.DeviceGetConfComputeGpuCertificate was just called") } callInfo := struct { Device nvml.Device - N int }{ Device: device, - N: n, } - mock.lockDeviceGetCpuAffinity.Lock() - mock.calls.DeviceGetCpuAffinity = append(mock.calls.DeviceGetCpuAffinity, callInfo) - mock.lockDeviceGetCpuAffinity.Unlock() - return mock.DeviceGetCpuAffinityFunc(device, n) + mock.lockDeviceGetConfComputeGpuCertificate.Lock() + mock.calls.DeviceGetConfComputeGpuCertificate = append(mock.calls.DeviceGetConfComputeGpuCertificate, callInfo) + mock.lockDeviceGetConfComputeGpuCertificate.Unlock() + return mock.DeviceGetConfComputeGpuCertificateFunc(device) } -// DeviceGetCpuAffinityCalls gets all the calls that were made to DeviceGetCpuAffinity. +// DeviceGetConfComputeGpuCertificateCalls gets all the calls that were made to DeviceGetConfComputeGpuCertificate. // Check the length with: // -// len(mockedInterface.DeviceGetCpuAffinityCalls()) +// len(mockedInterface.DeviceGetConfComputeGpuCertificateCalls()) +func (mock *Interface) DeviceGetConfComputeGpuCertificateCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetConfComputeGpuCertificate.RLock() + calls = mock.calls.DeviceGetConfComputeGpuCertificate + mock.lockDeviceGetConfComputeGpuCertificate.RUnlock() + return calls +} + +// DeviceGetConfComputeMemSizeInfo calls DeviceGetConfComputeMemSizeInfoFunc. +func (mock *Interface) DeviceGetConfComputeMemSizeInfo(device nvml.Device) (nvml.ConfComputeMemSizeInfo, nvml.Return) { + if mock.DeviceGetConfComputeMemSizeInfoFunc == nil { + panic("Interface.DeviceGetConfComputeMemSizeInfoFunc: method is nil but Interface.DeviceGetConfComputeMemSizeInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetConfComputeMemSizeInfo.Lock() + mock.calls.DeviceGetConfComputeMemSizeInfo = append(mock.calls.DeviceGetConfComputeMemSizeInfo, callInfo) + mock.lockDeviceGetConfComputeMemSizeInfo.Unlock() + return mock.DeviceGetConfComputeMemSizeInfoFunc(device) +} + +// DeviceGetConfComputeMemSizeInfoCalls gets all the calls that were made to DeviceGetConfComputeMemSizeInfo. +// Check the length with: +// +// len(mockedInterface.DeviceGetConfComputeMemSizeInfoCalls()) +func (mock *Interface) DeviceGetConfComputeMemSizeInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetConfComputeMemSizeInfo.RLock() + calls = mock.calls.DeviceGetConfComputeMemSizeInfo + mock.lockDeviceGetConfComputeMemSizeInfo.RUnlock() + return calls +} + +// DeviceGetConfComputeProtectedMemoryUsage calls DeviceGetConfComputeProtectedMemoryUsageFunc. +func (mock *Interface) DeviceGetConfComputeProtectedMemoryUsage(device nvml.Device) (nvml.Memory, nvml.Return) { + if mock.DeviceGetConfComputeProtectedMemoryUsageFunc == nil { + panic("Interface.DeviceGetConfComputeProtectedMemoryUsageFunc: method is nil but Interface.DeviceGetConfComputeProtectedMemoryUsage was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetConfComputeProtectedMemoryUsage.Lock() + mock.calls.DeviceGetConfComputeProtectedMemoryUsage = append(mock.calls.DeviceGetConfComputeProtectedMemoryUsage, callInfo) + mock.lockDeviceGetConfComputeProtectedMemoryUsage.Unlock() + return mock.DeviceGetConfComputeProtectedMemoryUsageFunc(device) +} + +// DeviceGetConfComputeProtectedMemoryUsageCalls gets all the calls that were made to DeviceGetConfComputeProtectedMemoryUsage. +// Check the length with: +// +// len(mockedInterface.DeviceGetConfComputeProtectedMemoryUsageCalls()) +func (mock *Interface) DeviceGetConfComputeProtectedMemoryUsageCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetConfComputeProtectedMemoryUsage.RLock() + calls = mock.calls.DeviceGetConfComputeProtectedMemoryUsage + mock.lockDeviceGetConfComputeProtectedMemoryUsage.RUnlock() + return calls +} + +// DeviceGetCoolerInfo calls DeviceGetCoolerInfoFunc. +func (mock *Interface) DeviceGetCoolerInfo(device nvml.Device) (nvml.CoolerInfo, nvml.Return) { + if mock.DeviceGetCoolerInfoFunc == nil { + panic("Interface.DeviceGetCoolerInfoFunc: method is nil but Interface.DeviceGetCoolerInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetCoolerInfo.Lock() + mock.calls.DeviceGetCoolerInfo = append(mock.calls.DeviceGetCoolerInfo, callInfo) + mock.lockDeviceGetCoolerInfo.Unlock() + return mock.DeviceGetCoolerInfoFunc(device) +} + +// DeviceGetCoolerInfoCalls gets all the calls that were made to DeviceGetCoolerInfo. +// Check the length with: +// +// len(mockedInterface.DeviceGetCoolerInfoCalls()) +func (mock *Interface) DeviceGetCoolerInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetCoolerInfo.RLock() + calls = mock.calls.DeviceGetCoolerInfo + mock.lockDeviceGetCoolerInfo.RUnlock() + return calls +} + +// DeviceGetCount calls DeviceGetCountFunc. +func (mock *Interface) DeviceGetCount() (int, nvml.Return) { + if mock.DeviceGetCountFunc == nil { + panic("Interface.DeviceGetCountFunc: method is nil but Interface.DeviceGetCount was just called") + } + callInfo := struct { + }{} + mock.lockDeviceGetCount.Lock() + mock.calls.DeviceGetCount = append(mock.calls.DeviceGetCount, callInfo) + mock.lockDeviceGetCount.Unlock() + return mock.DeviceGetCountFunc() +} + +// DeviceGetCountCalls gets all the calls that were made to DeviceGetCount. +// Check the length with: +// +// len(mockedInterface.DeviceGetCountCalls()) +func (mock *Interface) DeviceGetCountCalls() []struct { +} { + var calls []struct { + } + mock.lockDeviceGetCount.RLock() + calls = mock.calls.DeviceGetCount + mock.lockDeviceGetCount.RUnlock() + return calls +} + +// DeviceGetCpuAffinity calls DeviceGetCpuAffinityFunc. +func (mock *Interface) DeviceGetCpuAffinity(device nvml.Device, n int) ([]uint, nvml.Return) { + if mock.DeviceGetCpuAffinityFunc == nil { + panic("Interface.DeviceGetCpuAffinityFunc: method is nil but Interface.DeviceGetCpuAffinity was just called") + } + callInfo := struct { + Device nvml.Device + N int + }{ + Device: device, + N: n, + } + mock.lockDeviceGetCpuAffinity.Lock() + mock.calls.DeviceGetCpuAffinity = append(mock.calls.DeviceGetCpuAffinity, callInfo) + mock.lockDeviceGetCpuAffinity.Unlock() + return mock.DeviceGetCpuAffinityFunc(device, n) +} + +// DeviceGetCpuAffinityCalls gets all the calls that were made to DeviceGetCpuAffinity. +// Check the length with: +// +// len(mockedInterface.DeviceGetCpuAffinityCalls()) func (mock *Interface) DeviceGetCpuAffinityCalls() []struct { Device nvml.Device N int @@ -5184,6 +6292,70 @@ func (mock *Interface) DeviceGetCurrPcieLinkWidthCalls() []struct { return calls } +// DeviceGetCurrentClockFreqs calls DeviceGetCurrentClockFreqsFunc. +func (mock *Interface) DeviceGetCurrentClockFreqs(device nvml.Device) (nvml.DeviceCurrentClockFreqs, nvml.Return) { + if mock.DeviceGetCurrentClockFreqsFunc == nil { + panic("Interface.DeviceGetCurrentClockFreqsFunc: method is nil but Interface.DeviceGetCurrentClockFreqs was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetCurrentClockFreqs.Lock() + mock.calls.DeviceGetCurrentClockFreqs = append(mock.calls.DeviceGetCurrentClockFreqs, callInfo) + mock.lockDeviceGetCurrentClockFreqs.Unlock() + return mock.DeviceGetCurrentClockFreqsFunc(device) +} + +// DeviceGetCurrentClockFreqsCalls gets all the calls that were made to DeviceGetCurrentClockFreqs. +// Check the length with: +// +// len(mockedInterface.DeviceGetCurrentClockFreqsCalls()) +func (mock *Interface) DeviceGetCurrentClockFreqsCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetCurrentClockFreqs.RLock() + calls = mock.calls.DeviceGetCurrentClockFreqs + mock.lockDeviceGetCurrentClockFreqs.RUnlock() + return calls +} + +// DeviceGetCurrentClocksEventReasons calls DeviceGetCurrentClocksEventReasonsFunc. +func (mock *Interface) DeviceGetCurrentClocksEventReasons(device nvml.Device) (uint64, nvml.Return) { + if mock.DeviceGetCurrentClocksEventReasonsFunc == nil { + panic("Interface.DeviceGetCurrentClocksEventReasonsFunc: method is nil but Interface.DeviceGetCurrentClocksEventReasons was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetCurrentClocksEventReasons.Lock() + mock.calls.DeviceGetCurrentClocksEventReasons = append(mock.calls.DeviceGetCurrentClocksEventReasons, callInfo) + mock.lockDeviceGetCurrentClocksEventReasons.Unlock() + return mock.DeviceGetCurrentClocksEventReasonsFunc(device) +} + +// DeviceGetCurrentClocksEventReasonsCalls gets all the calls that were made to DeviceGetCurrentClocksEventReasons. +// Check the length with: +// +// len(mockedInterface.DeviceGetCurrentClocksEventReasonsCalls()) +func (mock *Interface) DeviceGetCurrentClocksEventReasonsCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetCurrentClocksEventReasons.RLock() + calls = mock.calls.DeviceGetCurrentClocksEventReasons + mock.lockDeviceGetCurrentClocksEventReasons.RUnlock() + return calls +} + // DeviceGetCurrentClocksThrottleReasons calls DeviceGetCurrentClocksThrottleReasonsFunc. func (mock *Interface) DeviceGetCurrentClocksThrottleReasons(device nvml.Device) (uint64, nvml.Return) { if mock.DeviceGetCurrentClocksThrottleReasonsFunc == nil { @@ -5452,6 +6624,38 @@ func (mock *Interface) DeviceGetDisplayModeCalls() []struct { return calls } +// DeviceGetDramEncryptionMode calls DeviceGetDramEncryptionModeFunc. +func (mock *Interface) DeviceGetDramEncryptionMode(device nvml.Device) (nvml.DramEncryptionInfo, nvml.DramEncryptionInfo, nvml.Return) { + if mock.DeviceGetDramEncryptionModeFunc == nil { + panic("Interface.DeviceGetDramEncryptionModeFunc: method is nil but Interface.DeviceGetDramEncryptionMode was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetDramEncryptionMode.Lock() + mock.calls.DeviceGetDramEncryptionMode = append(mock.calls.DeviceGetDramEncryptionMode, callInfo) + mock.lockDeviceGetDramEncryptionMode.Unlock() + return mock.DeviceGetDramEncryptionModeFunc(device) +} + +// DeviceGetDramEncryptionModeCalls gets all the calls that were made to DeviceGetDramEncryptionMode. +// Check the length with: +// +// len(mockedInterface.DeviceGetDramEncryptionModeCalls()) +func (mock *Interface) DeviceGetDramEncryptionModeCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetDramEncryptionMode.RLock() + calls = mock.calls.DeviceGetDramEncryptionMode + mock.lockDeviceGetDramEncryptionMode.RUnlock() + return calls +} + // DeviceGetDriverModel calls DeviceGetDriverModelFunc. func (mock *Interface) DeviceGetDriverModel(device nvml.Device) (nvml.DriverModel, nvml.DriverModel, nvml.Return) { if mock.DeviceGetDriverModelFunc == nil { @@ -5484,6 +6688,38 @@ func (mock *Interface) DeviceGetDriverModelCalls() []struct { return calls } +// DeviceGetDriverModel_v2 calls DeviceGetDriverModel_v2Func. +func (mock *Interface) DeviceGetDriverModel_v2(device nvml.Device) (nvml.DriverModel, nvml.DriverModel, nvml.Return) { + if mock.DeviceGetDriverModel_v2Func == nil { + panic("Interface.DeviceGetDriverModel_v2Func: method is nil but Interface.DeviceGetDriverModel_v2 was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetDriverModel_v2.Lock() + mock.calls.DeviceGetDriverModel_v2 = append(mock.calls.DeviceGetDriverModel_v2, callInfo) + mock.lockDeviceGetDriverModel_v2.Unlock() + return mock.DeviceGetDriverModel_v2Func(device) +} + +// DeviceGetDriverModel_v2Calls gets all the calls that were made to DeviceGetDriverModel_v2. +// Check the length with: +// +// len(mockedInterface.DeviceGetDriverModel_v2Calls()) +func (mock *Interface) DeviceGetDriverModel_v2Calls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetDriverModel_v2.RLock() + calls = mock.calls.DeviceGetDriverModel_v2 + mock.lockDeviceGetDriverModel_v2.RUnlock() + return calls +} + // DeviceGetDynamicPstatesInfo calls DeviceGetDynamicPstatesInfoFunc. func (mock *Interface) DeviceGetDynamicPstatesInfo(device nvml.Device) (nvml.GpuDynamicPstatesInfo, nvml.Return) { if mock.DeviceGetDynamicPstatesInfoFunc == nil { @@ -5844,6 +7080,38 @@ func (mock *Interface) DeviceGetFanSpeedCalls() []struct { return calls } +// DeviceGetFanSpeedRPM calls DeviceGetFanSpeedRPMFunc. +func (mock *Interface) DeviceGetFanSpeedRPM(device nvml.Device) (nvml.FanSpeedInfo, nvml.Return) { + if mock.DeviceGetFanSpeedRPMFunc == nil { + panic("Interface.DeviceGetFanSpeedRPMFunc: method is nil but Interface.DeviceGetFanSpeedRPM was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetFanSpeedRPM.Lock() + mock.calls.DeviceGetFanSpeedRPM = append(mock.calls.DeviceGetFanSpeedRPM, callInfo) + mock.lockDeviceGetFanSpeedRPM.Unlock() + return mock.DeviceGetFanSpeedRPMFunc(device) +} + +// DeviceGetFanSpeedRPMCalls gets all the calls that were made to DeviceGetFanSpeedRPM. +// Check the length with: +// +// len(mockedInterface.DeviceGetFanSpeedRPMCalls()) +func (mock *Interface) DeviceGetFanSpeedRPMCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetFanSpeedRPM.RLock() + calls = mock.calls.DeviceGetFanSpeedRPM + mock.lockDeviceGetFanSpeedRPM.RUnlock() + return calls +} + // DeviceGetFanSpeed_v2 calls DeviceGetFanSpeed_v2Func. func (mock *Interface) DeviceGetFanSpeed_v2(device nvml.Device, n int) (uint32, nvml.Return) { if mock.DeviceGetFanSpeed_v2Func == nil { @@ -6012,6 +7280,38 @@ func (mock *Interface) DeviceGetGpuFabricInfoCalls() []struct { return calls } +// DeviceGetGpuFabricInfoV calls DeviceGetGpuFabricInfoVFunc. +func (mock *Interface) DeviceGetGpuFabricInfoV(device nvml.Device) nvml.GpuFabricInfoHandler { + if mock.DeviceGetGpuFabricInfoVFunc == nil { + panic("Interface.DeviceGetGpuFabricInfoVFunc: method is nil but Interface.DeviceGetGpuFabricInfoV was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetGpuFabricInfoV.Lock() + mock.calls.DeviceGetGpuFabricInfoV = append(mock.calls.DeviceGetGpuFabricInfoV, callInfo) + mock.lockDeviceGetGpuFabricInfoV.Unlock() + return mock.DeviceGetGpuFabricInfoVFunc(device) +} + +// DeviceGetGpuFabricInfoVCalls gets all the calls that were made to DeviceGetGpuFabricInfoV. +// Check the length with: +// +// len(mockedInterface.DeviceGetGpuFabricInfoVCalls()) +func (mock *Interface) DeviceGetGpuFabricInfoVCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetGpuFabricInfoV.RLock() + calls = mock.calls.DeviceGetGpuFabricInfoV + mock.lockDeviceGetGpuFabricInfoV.RUnlock() + return calls +} + // DeviceGetGpuInstanceById calls DeviceGetGpuInstanceByIdFunc. func (mock *Interface) DeviceGetGpuInstanceById(device nvml.Device, n int) (nvml.GpuInstance, nvml.Return) { if mock.DeviceGetGpuInstanceByIdFunc == nil { @@ -6153,7 +7453,7 @@ func (mock *Interface) DeviceGetGpuInstanceProfileInfoCalls() []struct { } // DeviceGetGpuInstanceProfileInfoV calls DeviceGetGpuInstanceProfileInfoVFunc. -func (mock *Interface) DeviceGetGpuInstanceProfileInfoV(device nvml.Device, n int) nvml.GpuInstanceProfileInfoV { +func (mock *Interface) DeviceGetGpuInstanceProfileInfoV(device nvml.Device, n int) nvml.GpuInstanceProfileInfoHandler { if mock.DeviceGetGpuInstanceProfileInfoVFunc == nil { panic("Interface.DeviceGetGpuInstanceProfileInfoVFunc: method is nil but Interface.DeviceGetGpuInstanceProfileInfoV was just called") } @@ -6580,6 +7880,38 @@ func (mock *Interface) DeviceGetHandleByUUIDCalls() []struct { return calls } +// DeviceGetHandleByUUIDV calls DeviceGetHandleByUUIDVFunc. +func (mock *Interface) DeviceGetHandleByUUIDV(uUID *nvml.UUID) (nvml.Device, nvml.Return) { + if mock.DeviceGetHandleByUUIDVFunc == nil { + panic("Interface.DeviceGetHandleByUUIDVFunc: method is nil but Interface.DeviceGetHandleByUUIDV was just called") + } + callInfo := struct { + UUID *nvml.UUID + }{ + UUID: uUID, + } + mock.lockDeviceGetHandleByUUIDV.Lock() + mock.calls.DeviceGetHandleByUUIDV = append(mock.calls.DeviceGetHandleByUUIDV, callInfo) + mock.lockDeviceGetHandleByUUIDV.Unlock() + return mock.DeviceGetHandleByUUIDVFunc(uUID) +} + +// DeviceGetHandleByUUIDVCalls gets all the calls that were made to DeviceGetHandleByUUIDV. +// Check the length with: +// +// len(mockedInterface.DeviceGetHandleByUUIDVCalls()) +func (mock *Interface) DeviceGetHandleByUUIDVCalls() []struct { + UUID *nvml.UUID +} { + var calls []struct { + UUID *nvml.UUID + } + mock.lockDeviceGetHandleByUUIDV.RLock() + calls = mock.calls.DeviceGetHandleByUUIDV + mock.lockDeviceGetHandleByUUIDV.RUnlock() + return calls +} + // DeviceGetHostVgpuMode calls DeviceGetHostVgpuModeFunc. func (mock *Interface) DeviceGetHostVgpuMode(device nvml.Device) (nvml.HostVgpuMode, nvml.Return) { if mock.DeviceGetHostVgpuModeFunc == nil { @@ -6776,49 +8108,145 @@ func (mock *Interface) DeviceGetIrqNumCalls() []struct { return calls } -// DeviceGetMPSComputeRunningProcesses calls DeviceGetMPSComputeRunningProcessesFunc. -func (mock *Interface) DeviceGetMPSComputeRunningProcesses(device nvml.Device) ([]nvml.ProcessInfo, nvml.Return) { - if mock.DeviceGetMPSComputeRunningProcessesFunc == nil { - panic("Interface.DeviceGetMPSComputeRunningProcessesFunc: method is nil but Interface.DeviceGetMPSComputeRunningProcesses was just called") +// DeviceGetJpgUtilization calls DeviceGetJpgUtilizationFunc. +func (mock *Interface) DeviceGetJpgUtilization(device nvml.Device) (uint32, uint32, nvml.Return) { + if mock.DeviceGetJpgUtilizationFunc == nil { + panic("Interface.DeviceGetJpgUtilizationFunc: method is nil but Interface.DeviceGetJpgUtilization was just called") } callInfo := struct { Device nvml.Device }{ Device: device, } - mock.lockDeviceGetMPSComputeRunningProcesses.Lock() - mock.calls.DeviceGetMPSComputeRunningProcesses = append(mock.calls.DeviceGetMPSComputeRunningProcesses, callInfo) - mock.lockDeviceGetMPSComputeRunningProcesses.Unlock() - return mock.DeviceGetMPSComputeRunningProcessesFunc(device) + mock.lockDeviceGetJpgUtilization.Lock() + mock.calls.DeviceGetJpgUtilization = append(mock.calls.DeviceGetJpgUtilization, callInfo) + mock.lockDeviceGetJpgUtilization.Unlock() + return mock.DeviceGetJpgUtilizationFunc(device) } -// DeviceGetMPSComputeRunningProcessesCalls gets all the calls that were made to DeviceGetMPSComputeRunningProcesses. +// DeviceGetJpgUtilizationCalls gets all the calls that were made to DeviceGetJpgUtilization. // Check the length with: // -// len(mockedInterface.DeviceGetMPSComputeRunningProcessesCalls()) -func (mock *Interface) DeviceGetMPSComputeRunningProcessesCalls() []struct { +// len(mockedInterface.DeviceGetJpgUtilizationCalls()) +func (mock *Interface) DeviceGetJpgUtilizationCalls() []struct { Device nvml.Device } { var calls []struct { Device nvml.Device } - mock.lockDeviceGetMPSComputeRunningProcesses.RLock() - calls = mock.calls.DeviceGetMPSComputeRunningProcesses - mock.lockDeviceGetMPSComputeRunningProcesses.RUnlock() + mock.lockDeviceGetJpgUtilization.RLock() + calls = mock.calls.DeviceGetJpgUtilization + mock.lockDeviceGetJpgUtilization.RUnlock() return calls } -// DeviceGetMaxClockInfo calls DeviceGetMaxClockInfoFunc. -func (mock *Interface) DeviceGetMaxClockInfo(device nvml.Device, clockType nvml.ClockType) (uint32, nvml.Return) { - if mock.DeviceGetMaxClockInfoFunc == nil { - panic("Interface.DeviceGetMaxClockInfoFunc: method is nil but Interface.DeviceGetMaxClockInfo was just called") +// DeviceGetLastBBXFlushTime calls DeviceGetLastBBXFlushTimeFunc. +func (mock *Interface) DeviceGetLastBBXFlushTime(device nvml.Device) (uint64, uint, nvml.Return) { + if mock.DeviceGetLastBBXFlushTimeFunc == nil { + panic("Interface.DeviceGetLastBBXFlushTimeFunc: method is nil but Interface.DeviceGetLastBBXFlushTime was just called") } callInfo := struct { - Device nvml.Device - ClockType nvml.ClockType + Device nvml.Device }{ - Device: device, - ClockType: clockType, + Device: device, + } + mock.lockDeviceGetLastBBXFlushTime.Lock() + mock.calls.DeviceGetLastBBXFlushTime = append(mock.calls.DeviceGetLastBBXFlushTime, callInfo) + mock.lockDeviceGetLastBBXFlushTime.Unlock() + return mock.DeviceGetLastBBXFlushTimeFunc(device) +} + +// DeviceGetLastBBXFlushTimeCalls gets all the calls that were made to DeviceGetLastBBXFlushTime. +// Check the length with: +// +// len(mockedInterface.DeviceGetLastBBXFlushTimeCalls()) +func (mock *Interface) DeviceGetLastBBXFlushTimeCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetLastBBXFlushTime.RLock() + calls = mock.calls.DeviceGetLastBBXFlushTime + mock.lockDeviceGetLastBBXFlushTime.RUnlock() + return calls +} + +// DeviceGetMPSComputeRunningProcesses calls DeviceGetMPSComputeRunningProcessesFunc. +func (mock *Interface) DeviceGetMPSComputeRunningProcesses(device nvml.Device) ([]nvml.ProcessInfo, nvml.Return) { + if mock.DeviceGetMPSComputeRunningProcessesFunc == nil { + panic("Interface.DeviceGetMPSComputeRunningProcessesFunc: method is nil but Interface.DeviceGetMPSComputeRunningProcesses was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetMPSComputeRunningProcesses.Lock() + mock.calls.DeviceGetMPSComputeRunningProcesses = append(mock.calls.DeviceGetMPSComputeRunningProcesses, callInfo) + mock.lockDeviceGetMPSComputeRunningProcesses.Unlock() + return mock.DeviceGetMPSComputeRunningProcessesFunc(device) +} + +// DeviceGetMPSComputeRunningProcessesCalls gets all the calls that were made to DeviceGetMPSComputeRunningProcesses. +// Check the length with: +// +// len(mockedInterface.DeviceGetMPSComputeRunningProcessesCalls()) +func (mock *Interface) DeviceGetMPSComputeRunningProcessesCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetMPSComputeRunningProcesses.RLock() + calls = mock.calls.DeviceGetMPSComputeRunningProcesses + mock.lockDeviceGetMPSComputeRunningProcesses.RUnlock() + return calls +} + +// DeviceGetMarginTemperature calls DeviceGetMarginTemperatureFunc. +func (mock *Interface) DeviceGetMarginTemperature(device nvml.Device) (nvml.MarginTemperature, nvml.Return) { + if mock.DeviceGetMarginTemperatureFunc == nil { + panic("Interface.DeviceGetMarginTemperatureFunc: method is nil but Interface.DeviceGetMarginTemperature was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetMarginTemperature.Lock() + mock.calls.DeviceGetMarginTemperature = append(mock.calls.DeviceGetMarginTemperature, callInfo) + mock.lockDeviceGetMarginTemperature.Unlock() + return mock.DeviceGetMarginTemperatureFunc(device) +} + +// DeviceGetMarginTemperatureCalls gets all the calls that were made to DeviceGetMarginTemperature. +// Check the length with: +// +// len(mockedInterface.DeviceGetMarginTemperatureCalls()) +func (mock *Interface) DeviceGetMarginTemperatureCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetMarginTemperature.RLock() + calls = mock.calls.DeviceGetMarginTemperature + mock.lockDeviceGetMarginTemperature.RUnlock() + return calls +} + +// DeviceGetMaxClockInfo calls DeviceGetMaxClockInfoFunc. +func (mock *Interface) DeviceGetMaxClockInfo(device nvml.Device, clockType nvml.ClockType) (uint32, nvml.Return) { + if mock.DeviceGetMaxClockInfoFunc == nil { + panic("Interface.DeviceGetMaxClockInfoFunc: method is nil but Interface.DeviceGetMaxClockInfo was just called") + } + callInfo := struct { + Device nvml.Device + ClockType nvml.ClockType + }{ + Device: device, + ClockType: clockType, } mock.lockDeviceGetMaxClockInfo.Lock() mock.calls.DeviceGetMaxClockInfo = append(mock.calls.DeviceGetMaxClockInfo, callInfo) @@ -7392,6 +8820,38 @@ func (mock *Interface) DeviceGetMinorNumberCalls() []struct { return calls } +// DeviceGetModuleId calls DeviceGetModuleIdFunc. +func (mock *Interface) DeviceGetModuleId(device nvml.Device) (int, nvml.Return) { + if mock.DeviceGetModuleIdFunc == nil { + panic("Interface.DeviceGetModuleIdFunc: method is nil but Interface.DeviceGetModuleId was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetModuleId.Lock() + mock.calls.DeviceGetModuleId = append(mock.calls.DeviceGetModuleId, callInfo) + mock.lockDeviceGetModuleId.Unlock() + return mock.DeviceGetModuleIdFunc(device) +} + +// DeviceGetModuleIdCalls gets all the calls that were made to DeviceGetModuleId. +// Check the length with: +// +// len(mockedInterface.DeviceGetModuleIdCalls()) +func (mock *Interface) DeviceGetModuleIdCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetModuleId.RLock() + calls = mock.calls.DeviceGetModuleId + mock.lockDeviceGetModuleId.RUnlock() + return calls +} + // DeviceGetMultiGpuBoard calls DeviceGetMultiGpuBoardFunc. func (mock *Interface) DeviceGetMultiGpuBoard(device nvml.Device) (int, nvml.Return) { if mock.DeviceGetMultiGpuBoardFunc == nil { @@ -7520,6 +8980,38 @@ func (mock *Interface) DeviceGetNumGpuCoresCalls() []struct { return calls } +// DeviceGetNumaNodeId calls DeviceGetNumaNodeIdFunc. +func (mock *Interface) DeviceGetNumaNodeId(device nvml.Device) (int, nvml.Return) { + if mock.DeviceGetNumaNodeIdFunc == nil { + panic("Interface.DeviceGetNumaNodeIdFunc: method is nil but Interface.DeviceGetNumaNodeId was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetNumaNodeId.Lock() + mock.calls.DeviceGetNumaNodeId = append(mock.calls.DeviceGetNumaNodeId, callInfo) + mock.lockDeviceGetNumaNodeId.Unlock() + return mock.DeviceGetNumaNodeIdFunc(device) +} + +// DeviceGetNumaNodeIdCalls gets all the calls that were made to DeviceGetNumaNodeId. +// Check the length with: +// +// len(mockedInterface.DeviceGetNumaNodeIdCalls()) +func (mock *Interface) DeviceGetNumaNodeIdCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetNumaNodeId.RLock() + calls = mock.calls.DeviceGetNumaNodeId + mock.lockDeviceGetNumaNodeId.RUnlock() + return calls +} + // DeviceGetNvLinkCapability calls DeviceGetNvLinkCapabilityFunc. func (mock *Interface) DeviceGetNvLinkCapability(device nvml.Device, n int, nvLinkCapability nvml.NvLinkCapability) (uint32, nvml.Return) { if mock.DeviceGetNvLinkCapabilityFunc == nil { @@ -7824,6 +9316,102 @@ func (mock *Interface) DeviceGetNvLinkVersionCalls() []struct { return calls } +// DeviceGetNvlinkBwMode calls DeviceGetNvlinkBwModeFunc. +func (mock *Interface) DeviceGetNvlinkBwMode(device nvml.Device) (nvml.NvlinkGetBwMode, nvml.Return) { + if mock.DeviceGetNvlinkBwModeFunc == nil { + panic("Interface.DeviceGetNvlinkBwModeFunc: method is nil but Interface.DeviceGetNvlinkBwMode was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetNvlinkBwMode.Lock() + mock.calls.DeviceGetNvlinkBwMode = append(mock.calls.DeviceGetNvlinkBwMode, callInfo) + mock.lockDeviceGetNvlinkBwMode.Unlock() + return mock.DeviceGetNvlinkBwModeFunc(device) +} + +// DeviceGetNvlinkBwModeCalls gets all the calls that were made to DeviceGetNvlinkBwMode. +// Check the length with: +// +// len(mockedInterface.DeviceGetNvlinkBwModeCalls()) +func (mock *Interface) DeviceGetNvlinkBwModeCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetNvlinkBwMode.RLock() + calls = mock.calls.DeviceGetNvlinkBwMode + mock.lockDeviceGetNvlinkBwMode.RUnlock() + return calls +} + +// DeviceGetNvlinkSupportedBwModes calls DeviceGetNvlinkSupportedBwModesFunc. +func (mock *Interface) DeviceGetNvlinkSupportedBwModes(device nvml.Device) (nvml.NvlinkSupportedBwModes, nvml.Return) { + if mock.DeviceGetNvlinkSupportedBwModesFunc == nil { + panic("Interface.DeviceGetNvlinkSupportedBwModesFunc: method is nil but Interface.DeviceGetNvlinkSupportedBwModes was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetNvlinkSupportedBwModes.Lock() + mock.calls.DeviceGetNvlinkSupportedBwModes = append(mock.calls.DeviceGetNvlinkSupportedBwModes, callInfo) + mock.lockDeviceGetNvlinkSupportedBwModes.Unlock() + return mock.DeviceGetNvlinkSupportedBwModesFunc(device) +} + +// DeviceGetNvlinkSupportedBwModesCalls gets all the calls that were made to DeviceGetNvlinkSupportedBwModes. +// Check the length with: +// +// len(mockedInterface.DeviceGetNvlinkSupportedBwModesCalls()) +func (mock *Interface) DeviceGetNvlinkSupportedBwModesCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetNvlinkSupportedBwModes.RLock() + calls = mock.calls.DeviceGetNvlinkSupportedBwModes + mock.lockDeviceGetNvlinkSupportedBwModes.RUnlock() + return calls +} + +// DeviceGetOfaUtilization calls DeviceGetOfaUtilizationFunc. +func (mock *Interface) DeviceGetOfaUtilization(device nvml.Device) (uint32, uint32, nvml.Return) { + if mock.DeviceGetOfaUtilizationFunc == nil { + panic("Interface.DeviceGetOfaUtilizationFunc: method is nil but Interface.DeviceGetOfaUtilization was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetOfaUtilization.Lock() + mock.calls.DeviceGetOfaUtilization = append(mock.calls.DeviceGetOfaUtilization, callInfo) + mock.lockDeviceGetOfaUtilization.Unlock() + return mock.DeviceGetOfaUtilizationFunc(device) +} + +// DeviceGetOfaUtilizationCalls gets all the calls that were made to DeviceGetOfaUtilization. +// Check the length with: +// +// len(mockedInterface.DeviceGetOfaUtilizationCalls()) +func (mock *Interface) DeviceGetOfaUtilizationCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetOfaUtilization.RLock() + calls = mock.calls.DeviceGetOfaUtilization + mock.lockDeviceGetOfaUtilization.RUnlock() + return calls +} + // DeviceGetP2PStatus calls DeviceGetP2PStatusFunc. func (mock *Interface) DeviceGetP2PStatus(device1 nvml.Device, device2 nvml.Device, gpuP2PCapsIndex nvml.GpuP2PCapsIndex) (nvml.GpuP2PStatus, nvml.Return) { if mock.DeviceGetP2PStatusFunc == nil { @@ -7896,6 +9484,38 @@ func (mock *Interface) DeviceGetPciInfoCalls() []struct { return calls } +// DeviceGetPciInfoExt calls DeviceGetPciInfoExtFunc. +func (mock *Interface) DeviceGetPciInfoExt(device nvml.Device) (nvml.PciInfoExt, nvml.Return) { + if mock.DeviceGetPciInfoExtFunc == nil { + panic("Interface.DeviceGetPciInfoExtFunc: method is nil but Interface.DeviceGetPciInfoExt was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetPciInfoExt.Lock() + mock.calls.DeviceGetPciInfoExt = append(mock.calls.DeviceGetPciInfoExt, callInfo) + mock.lockDeviceGetPciInfoExt.Unlock() + return mock.DeviceGetPciInfoExtFunc(device) +} + +// DeviceGetPciInfoExtCalls gets all the calls that were made to DeviceGetPciInfoExt. +// Check the length with: +// +// len(mockedInterface.DeviceGetPciInfoExtCalls()) +func (mock *Interface) DeviceGetPciInfoExtCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetPciInfoExt.RLock() + calls = mock.calls.DeviceGetPciInfoExt + mock.lockDeviceGetPciInfoExt.RUnlock() + return calls +} + // DeviceGetPcieLinkMaxSpeed calls DeviceGetPcieLinkMaxSpeedFunc. func (mock *Interface) DeviceGetPcieLinkMaxSpeed(device nvml.Device) (uint32, nvml.Return) { if mock.DeviceGetPcieLinkMaxSpeedFunc == nil { @@ -8028,6 +9648,38 @@ func (mock *Interface) DeviceGetPcieThroughputCalls() []struct { return calls } +// DeviceGetPerformanceModes calls DeviceGetPerformanceModesFunc. +func (mock *Interface) DeviceGetPerformanceModes(device nvml.Device) (nvml.DevicePerfModes, nvml.Return) { + if mock.DeviceGetPerformanceModesFunc == nil { + panic("Interface.DeviceGetPerformanceModesFunc: method is nil but Interface.DeviceGetPerformanceModes was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetPerformanceModes.Lock() + mock.calls.DeviceGetPerformanceModes = append(mock.calls.DeviceGetPerformanceModes, callInfo) + mock.lockDeviceGetPerformanceModes.Unlock() + return mock.DeviceGetPerformanceModesFunc(device) +} + +// DeviceGetPerformanceModesCalls gets all the calls that were made to DeviceGetPerformanceModes. +// Check the length with: +// +// len(mockedInterface.DeviceGetPerformanceModesCalls()) +func (mock *Interface) DeviceGetPerformanceModesCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetPerformanceModes.RLock() + calls = mock.calls.DeviceGetPerformanceModes + mock.lockDeviceGetPerformanceModes.RUnlock() + return calls +} + // DeviceGetPerformanceState calls DeviceGetPerformanceStateFunc. func (mock *Interface) DeviceGetPerformanceState(device nvml.Device) (nvml.Pstates, nvml.Return) { if mock.DeviceGetPerformanceStateFunc == nil { @@ -8124,6 +9776,38 @@ func (mock *Interface) DeviceGetPgpuMetadataStringCalls() []struct { return calls } +// DeviceGetPlatformInfo calls DeviceGetPlatformInfoFunc. +func (mock *Interface) DeviceGetPlatformInfo(device nvml.Device) (nvml.PlatformInfo, nvml.Return) { + if mock.DeviceGetPlatformInfoFunc == nil { + panic("Interface.DeviceGetPlatformInfoFunc: method is nil but Interface.DeviceGetPlatformInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetPlatformInfo.Lock() + mock.calls.DeviceGetPlatformInfo = append(mock.calls.DeviceGetPlatformInfo, callInfo) + mock.lockDeviceGetPlatformInfo.Unlock() + return mock.DeviceGetPlatformInfoFunc(device) +} + +// DeviceGetPlatformInfoCalls gets all the calls that were made to DeviceGetPlatformInfo. +// Check the length with: +// +// len(mockedInterface.DeviceGetPlatformInfoCalls()) +func (mock *Interface) DeviceGetPlatformInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetPlatformInfo.RLock() + calls = mock.calls.DeviceGetPlatformInfo + mock.lockDeviceGetPlatformInfo.RUnlock() + return calls +} + // DeviceGetPowerManagementDefaultLimit calls DeviceGetPowerManagementDefaultLimitFunc. func (mock *Interface) DeviceGetPowerManagementDefaultLimit(device nvml.Device) (uint32, nvml.Return) { if mock.DeviceGetPowerManagementDefaultLimitFunc == nil { @@ -8384,6 +10068,38 @@ func (mock *Interface) DeviceGetProcessUtilizationCalls() []struct { return calls } +// DeviceGetProcessesUtilizationInfo calls DeviceGetProcessesUtilizationInfoFunc. +func (mock *Interface) DeviceGetProcessesUtilizationInfo(device nvml.Device) (nvml.ProcessesUtilizationInfo, nvml.Return) { + if mock.DeviceGetProcessesUtilizationInfoFunc == nil { + panic("Interface.DeviceGetProcessesUtilizationInfoFunc: method is nil but Interface.DeviceGetProcessesUtilizationInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetProcessesUtilizationInfo.Lock() + mock.calls.DeviceGetProcessesUtilizationInfo = append(mock.calls.DeviceGetProcessesUtilizationInfo, callInfo) + mock.lockDeviceGetProcessesUtilizationInfo.Unlock() + return mock.DeviceGetProcessesUtilizationInfoFunc(device) +} + +// DeviceGetProcessesUtilizationInfoCalls gets all the calls that were made to DeviceGetProcessesUtilizationInfo. +// Check the length with: +// +// len(mockedInterface.DeviceGetProcessesUtilizationInfoCalls()) +func (mock *Interface) DeviceGetProcessesUtilizationInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetProcessesUtilizationInfo.RLock() + calls = mock.calls.DeviceGetProcessesUtilizationInfo + mock.lockDeviceGetProcessesUtilizationInfo.RUnlock() + return calls +} + // DeviceGetRemappedRows calls DeviceGetRemappedRowsFunc. func (mock *Interface) DeviceGetRemappedRows(device nvml.Device) (int, int, bool, bool, nvml.Return) { if mock.DeviceGetRemappedRowsFunc == nil { @@ -8552,6 +10268,38 @@ func (mock *Interface) DeviceGetRowRemapperHistogramCalls() []struct { return calls } +// DeviceGetRunningProcessDetailList calls DeviceGetRunningProcessDetailListFunc. +func (mock *Interface) DeviceGetRunningProcessDetailList(device nvml.Device) (nvml.ProcessDetailList, nvml.Return) { + if mock.DeviceGetRunningProcessDetailListFunc == nil { + panic("Interface.DeviceGetRunningProcessDetailListFunc: method is nil but Interface.DeviceGetRunningProcessDetailList was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetRunningProcessDetailList.Lock() + mock.calls.DeviceGetRunningProcessDetailList = append(mock.calls.DeviceGetRunningProcessDetailList, callInfo) + mock.lockDeviceGetRunningProcessDetailList.Unlock() + return mock.DeviceGetRunningProcessDetailListFunc(device) +} + +// DeviceGetRunningProcessDetailListCalls gets all the calls that were made to DeviceGetRunningProcessDetailList. +// Check the length with: +// +// len(mockedInterface.DeviceGetRunningProcessDetailListCalls()) +func (mock *Interface) DeviceGetRunningProcessDetailListCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetRunningProcessDetailList.RLock() + calls = mock.calls.DeviceGetRunningProcessDetailList + mock.lockDeviceGetRunningProcessDetailList.RUnlock() + return calls +} + // DeviceGetSamples calls DeviceGetSamplesFunc. func (mock *Interface) DeviceGetSamples(device nvml.Device, samplingType nvml.SamplingType, v uint64) (nvml.ValueType, []nvml.Sample, nvml.Return) { if mock.DeviceGetSamplesFunc == nil { @@ -8624,7 +10372,71 @@ func (mock *Interface) DeviceGetSerialCalls() []struct { return calls } -// DeviceGetSupportedClocksThrottleReasons calls DeviceGetSupportedClocksThrottleReasonsFunc. +// DeviceGetSramEccErrorStatus calls DeviceGetSramEccErrorStatusFunc. +func (mock *Interface) DeviceGetSramEccErrorStatus(device nvml.Device) (nvml.EccSramErrorStatus, nvml.Return) { + if mock.DeviceGetSramEccErrorStatusFunc == nil { + panic("Interface.DeviceGetSramEccErrorStatusFunc: method is nil but Interface.DeviceGetSramEccErrorStatus was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetSramEccErrorStatus.Lock() + mock.calls.DeviceGetSramEccErrorStatus = append(mock.calls.DeviceGetSramEccErrorStatus, callInfo) + mock.lockDeviceGetSramEccErrorStatus.Unlock() + return mock.DeviceGetSramEccErrorStatusFunc(device) +} + +// DeviceGetSramEccErrorStatusCalls gets all the calls that were made to DeviceGetSramEccErrorStatus. +// Check the length with: +// +// len(mockedInterface.DeviceGetSramEccErrorStatusCalls()) +func (mock *Interface) DeviceGetSramEccErrorStatusCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetSramEccErrorStatus.RLock() + calls = mock.calls.DeviceGetSramEccErrorStatus + mock.lockDeviceGetSramEccErrorStatus.RUnlock() + return calls +} + +// DeviceGetSupportedClocksEventReasons calls DeviceGetSupportedClocksEventReasonsFunc. +func (mock *Interface) DeviceGetSupportedClocksEventReasons(device nvml.Device) (uint64, nvml.Return) { + if mock.DeviceGetSupportedClocksEventReasonsFunc == nil { + panic("Interface.DeviceGetSupportedClocksEventReasonsFunc: method is nil but Interface.DeviceGetSupportedClocksEventReasons was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetSupportedClocksEventReasons.Lock() + mock.calls.DeviceGetSupportedClocksEventReasons = append(mock.calls.DeviceGetSupportedClocksEventReasons, callInfo) + mock.lockDeviceGetSupportedClocksEventReasons.Unlock() + return mock.DeviceGetSupportedClocksEventReasonsFunc(device) +} + +// DeviceGetSupportedClocksEventReasonsCalls gets all the calls that were made to DeviceGetSupportedClocksEventReasons. +// Check the length with: +// +// len(mockedInterface.DeviceGetSupportedClocksEventReasonsCalls()) +func (mock *Interface) DeviceGetSupportedClocksEventReasonsCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetSupportedClocksEventReasons.RLock() + calls = mock.calls.DeviceGetSupportedClocksEventReasons + mock.lockDeviceGetSupportedClocksEventReasons.RUnlock() + return calls +} + +// DeviceGetSupportedClocksThrottleReasons calls DeviceGetSupportedClocksThrottleReasonsFunc. func (mock *Interface) DeviceGetSupportedClocksThrottleReasons(device nvml.Device) (uint64, nvml.Return) { if mock.DeviceGetSupportedClocksThrottleReasonsFunc == nil { panic("Interface.DeviceGetSupportedClocksThrottleReasonsFunc: method is nil but Interface.DeviceGetSupportedClocksThrottleReasons was just called") @@ -8928,6 +10740,38 @@ func (mock *Interface) DeviceGetTemperatureThresholdCalls() []struct { return calls } +// DeviceGetTemperatureV calls DeviceGetTemperatureVFunc. +func (mock *Interface) DeviceGetTemperatureV(device nvml.Device) nvml.TemperatureHandler { + if mock.DeviceGetTemperatureVFunc == nil { + panic("Interface.DeviceGetTemperatureVFunc: method is nil but Interface.DeviceGetTemperatureV was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetTemperatureV.Lock() + mock.calls.DeviceGetTemperatureV = append(mock.calls.DeviceGetTemperatureV, callInfo) + mock.lockDeviceGetTemperatureV.Unlock() + return mock.DeviceGetTemperatureVFunc(device) +} + +// DeviceGetTemperatureVCalls gets all the calls that were made to DeviceGetTemperatureV. +// Check the length with: +// +// len(mockedInterface.DeviceGetTemperatureVCalls()) +func (mock *Interface) DeviceGetTemperatureVCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetTemperatureV.RLock() + calls = mock.calls.DeviceGetTemperatureV + mock.lockDeviceGetTemperatureV.RUnlock() + return calls +} + // DeviceGetThermalSettings calls DeviceGetThermalSettingsFunc. func (mock *Interface) DeviceGetThermalSettings(device nvml.Device, v uint32) (nvml.GpuThermalSettings, nvml.Return) { if mock.DeviceGetThermalSettingsFunc == nil { @@ -9240,6 +11084,70 @@ func (mock *Interface) DeviceGetVgpuCapabilitiesCalls() []struct { return calls } +// DeviceGetVgpuHeterogeneousMode calls DeviceGetVgpuHeterogeneousModeFunc. +func (mock *Interface) DeviceGetVgpuHeterogeneousMode(device nvml.Device) (nvml.VgpuHeterogeneousMode, nvml.Return) { + if mock.DeviceGetVgpuHeterogeneousModeFunc == nil { + panic("Interface.DeviceGetVgpuHeterogeneousModeFunc: method is nil but Interface.DeviceGetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetVgpuHeterogeneousMode.Lock() + mock.calls.DeviceGetVgpuHeterogeneousMode = append(mock.calls.DeviceGetVgpuHeterogeneousMode, callInfo) + mock.lockDeviceGetVgpuHeterogeneousMode.Unlock() + return mock.DeviceGetVgpuHeterogeneousModeFunc(device) +} + +// DeviceGetVgpuHeterogeneousModeCalls gets all the calls that were made to DeviceGetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedInterface.DeviceGetVgpuHeterogeneousModeCalls()) +func (mock *Interface) DeviceGetVgpuHeterogeneousModeCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetVgpuHeterogeneousMode.RLock() + calls = mock.calls.DeviceGetVgpuHeterogeneousMode + mock.lockDeviceGetVgpuHeterogeneousMode.RUnlock() + return calls +} + +// DeviceGetVgpuInstancesUtilizationInfo calls DeviceGetVgpuInstancesUtilizationInfoFunc. +func (mock *Interface) DeviceGetVgpuInstancesUtilizationInfo(device nvml.Device) (nvml.VgpuInstancesUtilizationInfo, nvml.Return) { + if mock.DeviceGetVgpuInstancesUtilizationInfoFunc == nil { + panic("Interface.DeviceGetVgpuInstancesUtilizationInfoFunc: method is nil but Interface.DeviceGetVgpuInstancesUtilizationInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetVgpuInstancesUtilizationInfo.Lock() + mock.calls.DeviceGetVgpuInstancesUtilizationInfo = append(mock.calls.DeviceGetVgpuInstancesUtilizationInfo, callInfo) + mock.lockDeviceGetVgpuInstancesUtilizationInfo.Unlock() + return mock.DeviceGetVgpuInstancesUtilizationInfoFunc(device) +} + +// DeviceGetVgpuInstancesUtilizationInfoCalls gets all the calls that were made to DeviceGetVgpuInstancesUtilizationInfo. +// Check the length with: +// +// len(mockedInterface.DeviceGetVgpuInstancesUtilizationInfoCalls()) +func (mock *Interface) DeviceGetVgpuInstancesUtilizationInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetVgpuInstancesUtilizationInfo.RLock() + calls = mock.calls.DeviceGetVgpuInstancesUtilizationInfo + mock.lockDeviceGetVgpuInstancesUtilizationInfo.RUnlock() + return calls +} + // DeviceGetVgpuMetadata calls DeviceGetVgpuMetadataFunc. func (mock *Interface) DeviceGetVgpuMetadata(device nvml.Device) (nvml.VgpuPgpuMetadata, nvml.Return) { if mock.DeviceGetVgpuMetadataFunc == nil { @@ -9308,6 +11216,38 @@ func (mock *Interface) DeviceGetVgpuProcessUtilizationCalls() []struct { return calls } +// DeviceGetVgpuProcessesUtilizationInfo calls DeviceGetVgpuProcessesUtilizationInfoFunc. +func (mock *Interface) DeviceGetVgpuProcessesUtilizationInfo(device nvml.Device) (nvml.VgpuProcessesUtilizationInfo, nvml.Return) { + if mock.DeviceGetVgpuProcessesUtilizationInfoFunc == nil { + panic("Interface.DeviceGetVgpuProcessesUtilizationInfoFunc: method is nil but Interface.DeviceGetVgpuProcessesUtilizationInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceGetVgpuProcessesUtilizationInfo.Lock() + mock.calls.DeviceGetVgpuProcessesUtilizationInfo = append(mock.calls.DeviceGetVgpuProcessesUtilizationInfo, callInfo) + mock.lockDeviceGetVgpuProcessesUtilizationInfo.Unlock() + return mock.DeviceGetVgpuProcessesUtilizationInfoFunc(device) +} + +// DeviceGetVgpuProcessesUtilizationInfoCalls gets all the calls that were made to DeviceGetVgpuProcessesUtilizationInfo. +// Check the length with: +// +// len(mockedInterface.DeviceGetVgpuProcessesUtilizationInfoCalls()) +func (mock *Interface) DeviceGetVgpuProcessesUtilizationInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceGetVgpuProcessesUtilizationInfo.RLock() + calls = mock.calls.DeviceGetVgpuProcessesUtilizationInfo + mock.lockDeviceGetVgpuProcessesUtilizationInfo.RUnlock() + return calls +} + // DeviceGetVgpuSchedulerCapabilities calls DeviceGetVgpuSchedulerCapabilitiesFunc. func (mock *Interface) DeviceGetVgpuSchedulerCapabilities(device nvml.Device) (nvml.VgpuSchedulerCapabilities, nvml.Return) { if mock.DeviceGetVgpuSchedulerCapabilitiesFunc == nil { @@ -9404,6 +11344,78 @@ func (mock *Interface) DeviceGetVgpuSchedulerStateCalls() []struct { return calls } +// DeviceGetVgpuTypeCreatablePlacements calls DeviceGetVgpuTypeCreatablePlacementsFunc. +func (mock *Interface) DeviceGetVgpuTypeCreatablePlacements(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { + if mock.DeviceGetVgpuTypeCreatablePlacementsFunc == nil { + panic("Interface.DeviceGetVgpuTypeCreatablePlacementsFunc: method is nil but Interface.DeviceGetVgpuTypeCreatablePlacements was just called") + } + callInfo := struct { + Device nvml.Device + VgpuTypeId nvml.VgpuTypeId + }{ + Device: device, + VgpuTypeId: vgpuTypeId, + } + mock.lockDeviceGetVgpuTypeCreatablePlacements.Lock() + mock.calls.DeviceGetVgpuTypeCreatablePlacements = append(mock.calls.DeviceGetVgpuTypeCreatablePlacements, callInfo) + mock.lockDeviceGetVgpuTypeCreatablePlacements.Unlock() + return mock.DeviceGetVgpuTypeCreatablePlacementsFunc(device, vgpuTypeId) +} + +// DeviceGetVgpuTypeCreatablePlacementsCalls gets all the calls that were made to DeviceGetVgpuTypeCreatablePlacements. +// Check the length with: +// +// len(mockedInterface.DeviceGetVgpuTypeCreatablePlacementsCalls()) +func (mock *Interface) DeviceGetVgpuTypeCreatablePlacementsCalls() []struct { + Device nvml.Device + VgpuTypeId nvml.VgpuTypeId +} { + var calls []struct { + Device nvml.Device + VgpuTypeId nvml.VgpuTypeId + } + mock.lockDeviceGetVgpuTypeCreatablePlacements.RLock() + calls = mock.calls.DeviceGetVgpuTypeCreatablePlacements + mock.lockDeviceGetVgpuTypeCreatablePlacements.RUnlock() + return calls +} + +// DeviceGetVgpuTypeSupportedPlacements calls DeviceGetVgpuTypeSupportedPlacementsFunc. +func (mock *Interface) DeviceGetVgpuTypeSupportedPlacements(device nvml.Device, vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuPlacementList, nvml.Return) { + if mock.DeviceGetVgpuTypeSupportedPlacementsFunc == nil { + panic("Interface.DeviceGetVgpuTypeSupportedPlacementsFunc: method is nil but Interface.DeviceGetVgpuTypeSupportedPlacements was just called") + } + callInfo := struct { + Device nvml.Device + VgpuTypeId nvml.VgpuTypeId + }{ + Device: device, + VgpuTypeId: vgpuTypeId, + } + mock.lockDeviceGetVgpuTypeSupportedPlacements.Lock() + mock.calls.DeviceGetVgpuTypeSupportedPlacements = append(mock.calls.DeviceGetVgpuTypeSupportedPlacements, callInfo) + mock.lockDeviceGetVgpuTypeSupportedPlacements.Unlock() + return mock.DeviceGetVgpuTypeSupportedPlacementsFunc(device, vgpuTypeId) +} + +// DeviceGetVgpuTypeSupportedPlacementsCalls gets all the calls that were made to DeviceGetVgpuTypeSupportedPlacements. +// Check the length with: +// +// len(mockedInterface.DeviceGetVgpuTypeSupportedPlacementsCalls()) +func (mock *Interface) DeviceGetVgpuTypeSupportedPlacementsCalls() []struct { + Device nvml.Device + VgpuTypeId nvml.VgpuTypeId +} { + var calls []struct { + Device nvml.Device + VgpuTypeId nvml.VgpuTypeId + } + mock.lockDeviceGetVgpuTypeSupportedPlacements.RLock() + calls = mock.calls.DeviceGetVgpuTypeSupportedPlacements + mock.lockDeviceGetVgpuTypeSupportedPlacements.RUnlock() + return calls +} + // DeviceGetVgpuUtilization calls DeviceGetVgpuUtilizationFunc. func (mock *Interface) DeviceGetVgpuUtilization(device nvml.Device, v uint64) (nvml.ValueType, []nvml.VgpuInstanceUtilizationSample, nvml.Return) { if mock.DeviceGetVgpuUtilizationFunc == nil { @@ -9612,6 +11624,114 @@ func (mock *Interface) DeviceOnSameBoardCalls() []struct { return calls } +// DevicePowerSmoothingActivatePresetProfile calls DevicePowerSmoothingActivatePresetProfileFunc. +func (mock *Interface) DevicePowerSmoothingActivatePresetProfile(device nvml.Device, powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { + if mock.DevicePowerSmoothingActivatePresetProfileFunc == nil { + panic("Interface.DevicePowerSmoothingActivatePresetProfileFunc: method is nil but Interface.DevicePowerSmoothingActivatePresetProfile was just called") + } + callInfo := struct { + Device nvml.Device + PowerSmoothingProfile *nvml.PowerSmoothingProfile + }{ + Device: device, + PowerSmoothingProfile: powerSmoothingProfile, + } + mock.lockDevicePowerSmoothingActivatePresetProfile.Lock() + mock.calls.DevicePowerSmoothingActivatePresetProfile = append(mock.calls.DevicePowerSmoothingActivatePresetProfile, callInfo) + mock.lockDevicePowerSmoothingActivatePresetProfile.Unlock() + return mock.DevicePowerSmoothingActivatePresetProfileFunc(device, powerSmoothingProfile) +} + +// DevicePowerSmoothingActivatePresetProfileCalls gets all the calls that were made to DevicePowerSmoothingActivatePresetProfile. +// Check the length with: +// +// len(mockedInterface.DevicePowerSmoothingActivatePresetProfileCalls()) +func (mock *Interface) DevicePowerSmoothingActivatePresetProfileCalls() []struct { + Device nvml.Device + PowerSmoothingProfile *nvml.PowerSmoothingProfile +} { + var calls []struct { + Device nvml.Device + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } + mock.lockDevicePowerSmoothingActivatePresetProfile.RLock() + calls = mock.calls.DevicePowerSmoothingActivatePresetProfile + mock.lockDevicePowerSmoothingActivatePresetProfile.RUnlock() + return calls +} + +// DevicePowerSmoothingSetState calls DevicePowerSmoothingSetStateFunc. +func (mock *Interface) DevicePowerSmoothingSetState(device nvml.Device, powerSmoothingState *nvml.PowerSmoothingState) nvml.Return { + if mock.DevicePowerSmoothingSetStateFunc == nil { + panic("Interface.DevicePowerSmoothingSetStateFunc: method is nil but Interface.DevicePowerSmoothingSetState was just called") + } + callInfo := struct { + Device nvml.Device + PowerSmoothingState *nvml.PowerSmoothingState + }{ + Device: device, + PowerSmoothingState: powerSmoothingState, + } + mock.lockDevicePowerSmoothingSetState.Lock() + mock.calls.DevicePowerSmoothingSetState = append(mock.calls.DevicePowerSmoothingSetState, callInfo) + mock.lockDevicePowerSmoothingSetState.Unlock() + return mock.DevicePowerSmoothingSetStateFunc(device, powerSmoothingState) +} + +// DevicePowerSmoothingSetStateCalls gets all the calls that were made to DevicePowerSmoothingSetState. +// Check the length with: +// +// len(mockedInterface.DevicePowerSmoothingSetStateCalls()) +func (mock *Interface) DevicePowerSmoothingSetStateCalls() []struct { + Device nvml.Device + PowerSmoothingState *nvml.PowerSmoothingState +} { + var calls []struct { + Device nvml.Device + PowerSmoothingState *nvml.PowerSmoothingState + } + mock.lockDevicePowerSmoothingSetState.RLock() + calls = mock.calls.DevicePowerSmoothingSetState + mock.lockDevicePowerSmoothingSetState.RUnlock() + return calls +} + +// DevicePowerSmoothingUpdatePresetProfileParam calls DevicePowerSmoothingUpdatePresetProfileParamFunc. +func (mock *Interface) DevicePowerSmoothingUpdatePresetProfileParam(device nvml.Device, powerSmoothingProfile *nvml.PowerSmoothingProfile) nvml.Return { + if mock.DevicePowerSmoothingUpdatePresetProfileParamFunc == nil { + panic("Interface.DevicePowerSmoothingUpdatePresetProfileParamFunc: method is nil but Interface.DevicePowerSmoothingUpdatePresetProfileParam was just called") + } + callInfo := struct { + Device nvml.Device + PowerSmoothingProfile *nvml.PowerSmoothingProfile + }{ + Device: device, + PowerSmoothingProfile: powerSmoothingProfile, + } + mock.lockDevicePowerSmoothingUpdatePresetProfileParam.Lock() + mock.calls.DevicePowerSmoothingUpdatePresetProfileParam = append(mock.calls.DevicePowerSmoothingUpdatePresetProfileParam, callInfo) + mock.lockDevicePowerSmoothingUpdatePresetProfileParam.Unlock() + return mock.DevicePowerSmoothingUpdatePresetProfileParamFunc(device, powerSmoothingProfile) +} + +// DevicePowerSmoothingUpdatePresetProfileParamCalls gets all the calls that were made to DevicePowerSmoothingUpdatePresetProfileParam. +// Check the length with: +// +// len(mockedInterface.DevicePowerSmoothingUpdatePresetProfileParamCalls()) +func (mock *Interface) DevicePowerSmoothingUpdatePresetProfileParamCalls() []struct { + Device nvml.Device + PowerSmoothingProfile *nvml.PowerSmoothingProfile +} { + var calls []struct { + Device nvml.Device + PowerSmoothingProfile *nvml.PowerSmoothingProfile + } + mock.lockDevicePowerSmoothingUpdatePresetProfileParam.RLock() + calls = mock.calls.DevicePowerSmoothingUpdatePresetProfileParam + mock.lockDevicePowerSmoothingUpdatePresetProfileParam.RUnlock() + return calls +} + // DeviceQueryDrainState calls DeviceQueryDrainStateFunc. func (mock *Interface) DeviceQueryDrainState(pciInfo *nvml.PciInfo) (nvml.EnableState, nvml.Return) { if mock.DeviceQueryDrainStateFunc == nil { @@ -10080,6 +12200,42 @@ func (mock *Interface) DeviceSetAutoBoostedClocksEnabledCalls() []struct { return calls } +// DeviceSetClockOffsets calls DeviceSetClockOffsetsFunc. +func (mock *Interface) DeviceSetClockOffsets(device nvml.Device, clockOffset nvml.ClockOffset) nvml.Return { + if mock.DeviceSetClockOffsetsFunc == nil { + panic("Interface.DeviceSetClockOffsetsFunc: method is nil but Interface.DeviceSetClockOffsets was just called") + } + callInfo := struct { + Device nvml.Device + ClockOffset nvml.ClockOffset + }{ + Device: device, + ClockOffset: clockOffset, + } + mock.lockDeviceSetClockOffsets.Lock() + mock.calls.DeviceSetClockOffsets = append(mock.calls.DeviceSetClockOffsets, callInfo) + mock.lockDeviceSetClockOffsets.Unlock() + return mock.DeviceSetClockOffsetsFunc(device, clockOffset) +} + +// DeviceSetClockOffsetsCalls gets all the calls that were made to DeviceSetClockOffsets. +// Check the length with: +// +// len(mockedInterface.DeviceSetClockOffsetsCalls()) +func (mock *Interface) DeviceSetClockOffsetsCalls() []struct { + Device nvml.Device + ClockOffset nvml.ClockOffset +} { + var calls []struct { + Device nvml.Device + ClockOffset nvml.ClockOffset + } + mock.lockDeviceSetClockOffsets.RLock() + calls = mock.calls.DeviceSetClockOffsets + mock.lockDeviceSetClockOffsets.RUnlock() + return calls +} + // DeviceSetComputeMode calls DeviceSetComputeModeFunc. func (mock *Interface) DeviceSetComputeMode(device nvml.Device, computeMode nvml.ComputeMode) nvml.Return { if mock.DeviceSetComputeModeFunc == nil { @@ -10116,6 +12272,42 @@ func (mock *Interface) DeviceSetComputeModeCalls() []struct { return calls } +// DeviceSetConfComputeUnprotectedMemSize calls DeviceSetConfComputeUnprotectedMemSizeFunc. +func (mock *Interface) DeviceSetConfComputeUnprotectedMemSize(device nvml.Device, v uint64) nvml.Return { + if mock.DeviceSetConfComputeUnprotectedMemSizeFunc == nil { + panic("Interface.DeviceSetConfComputeUnprotectedMemSizeFunc: method is nil but Interface.DeviceSetConfComputeUnprotectedMemSize was just called") + } + callInfo := struct { + Device nvml.Device + V uint64 + }{ + Device: device, + V: v, + } + mock.lockDeviceSetConfComputeUnprotectedMemSize.Lock() + mock.calls.DeviceSetConfComputeUnprotectedMemSize = append(mock.calls.DeviceSetConfComputeUnprotectedMemSize, callInfo) + mock.lockDeviceSetConfComputeUnprotectedMemSize.Unlock() + return mock.DeviceSetConfComputeUnprotectedMemSizeFunc(device, v) +} + +// DeviceSetConfComputeUnprotectedMemSizeCalls gets all the calls that were made to DeviceSetConfComputeUnprotectedMemSize. +// Check the length with: +// +// len(mockedInterface.DeviceSetConfComputeUnprotectedMemSizeCalls()) +func (mock *Interface) DeviceSetConfComputeUnprotectedMemSizeCalls() []struct { + Device nvml.Device + V uint64 +} { + var calls []struct { + Device nvml.Device + V uint64 + } + mock.lockDeviceSetConfComputeUnprotectedMemSize.RLock() + calls = mock.calls.DeviceSetConfComputeUnprotectedMemSize + mock.lockDeviceSetConfComputeUnprotectedMemSize.RUnlock() + return calls +} + // DeviceSetCpuAffinity calls DeviceSetCpuAffinityFunc. func (mock *Interface) DeviceSetCpuAffinity(device nvml.Device) nvml.Return { if mock.DeviceSetCpuAffinityFunc == nil { @@ -10224,17 +12416,53 @@ func (mock *Interface) DeviceSetDefaultFanSpeed_v2Calls() []struct { return calls } -// DeviceSetDriverModel calls DeviceSetDriverModelFunc. -func (mock *Interface) DeviceSetDriverModel(device nvml.Device, driverModel nvml.DriverModel, v uint32) nvml.Return { - if mock.DeviceSetDriverModelFunc == nil { - panic("Interface.DeviceSetDriverModelFunc: method is nil but Interface.DeviceSetDriverModel was just called") +// DeviceSetDramEncryptionMode calls DeviceSetDramEncryptionModeFunc. +func (mock *Interface) DeviceSetDramEncryptionMode(device nvml.Device, dramEncryptionInfo *nvml.DramEncryptionInfo) nvml.Return { + if mock.DeviceSetDramEncryptionModeFunc == nil { + panic("Interface.DeviceSetDramEncryptionModeFunc: method is nil but Interface.DeviceSetDramEncryptionMode was just called") } callInfo := struct { - Device nvml.Device - DriverModel nvml.DriverModel - V uint32 + Device nvml.Device + DramEncryptionInfo *nvml.DramEncryptionInfo }{ - Device: device, + Device: device, + DramEncryptionInfo: dramEncryptionInfo, + } + mock.lockDeviceSetDramEncryptionMode.Lock() + mock.calls.DeviceSetDramEncryptionMode = append(mock.calls.DeviceSetDramEncryptionMode, callInfo) + mock.lockDeviceSetDramEncryptionMode.Unlock() + return mock.DeviceSetDramEncryptionModeFunc(device, dramEncryptionInfo) +} + +// DeviceSetDramEncryptionModeCalls gets all the calls that were made to DeviceSetDramEncryptionMode. +// Check the length with: +// +// len(mockedInterface.DeviceSetDramEncryptionModeCalls()) +func (mock *Interface) DeviceSetDramEncryptionModeCalls() []struct { + Device nvml.Device + DramEncryptionInfo *nvml.DramEncryptionInfo +} { + var calls []struct { + Device nvml.Device + DramEncryptionInfo *nvml.DramEncryptionInfo + } + mock.lockDeviceSetDramEncryptionMode.RLock() + calls = mock.calls.DeviceSetDramEncryptionMode + mock.lockDeviceSetDramEncryptionMode.RUnlock() + return calls +} + +// DeviceSetDriverModel calls DeviceSetDriverModelFunc. +func (mock *Interface) DeviceSetDriverModel(device nvml.Device, driverModel nvml.DriverModel, v uint32) nvml.Return { + if mock.DeviceSetDriverModelFunc == nil { + panic("Interface.DeviceSetDriverModelFunc: method is nil but Interface.DeviceSetDriverModel was just called") + } + callInfo := struct { + Device nvml.Device + DriverModel nvml.DriverModel + V uint32 + }{ + Device: device, DriverModel: driverModel, V: v, } @@ -10688,6 +12916,42 @@ func (mock *Interface) DeviceSetNvLinkUtilizationControlCalls() []struct { return calls } +// DeviceSetNvlinkBwMode calls DeviceSetNvlinkBwModeFunc. +func (mock *Interface) DeviceSetNvlinkBwMode(device nvml.Device, nvlinkSetBwMode *nvml.NvlinkSetBwMode) nvml.Return { + if mock.DeviceSetNvlinkBwModeFunc == nil { + panic("Interface.DeviceSetNvlinkBwModeFunc: method is nil but Interface.DeviceSetNvlinkBwMode was just called") + } + callInfo := struct { + Device nvml.Device + NvlinkSetBwMode *nvml.NvlinkSetBwMode + }{ + Device: device, + NvlinkSetBwMode: nvlinkSetBwMode, + } + mock.lockDeviceSetNvlinkBwMode.Lock() + mock.calls.DeviceSetNvlinkBwMode = append(mock.calls.DeviceSetNvlinkBwMode, callInfo) + mock.lockDeviceSetNvlinkBwMode.Unlock() + return mock.DeviceSetNvlinkBwModeFunc(device, nvlinkSetBwMode) +} + +// DeviceSetNvlinkBwModeCalls gets all the calls that were made to DeviceSetNvlinkBwMode. +// Check the length with: +// +// len(mockedInterface.DeviceSetNvlinkBwModeCalls()) +func (mock *Interface) DeviceSetNvlinkBwModeCalls() []struct { + Device nvml.Device + NvlinkSetBwMode *nvml.NvlinkSetBwMode +} { + var calls []struct { + Device nvml.Device + NvlinkSetBwMode *nvml.NvlinkSetBwMode + } + mock.lockDeviceSetNvlinkBwMode.RLock() + calls = mock.calls.DeviceSetNvlinkBwMode + mock.lockDeviceSetNvlinkBwMode.RUnlock() + return calls +} + // DeviceSetPersistenceMode calls DeviceSetPersistenceModeFunc. func (mock *Interface) DeviceSetPersistenceMode(device nvml.Device, enableState nvml.EnableState) nvml.Return { if mock.DeviceSetPersistenceModeFunc == nil { @@ -10760,6 +13024,42 @@ func (mock *Interface) DeviceSetPowerManagementLimitCalls() []struct { return calls } +// DeviceSetPowerManagementLimit_v2 calls DeviceSetPowerManagementLimit_v2Func. +func (mock *Interface) DeviceSetPowerManagementLimit_v2(device nvml.Device, powerValue_v2 *nvml.PowerValue_v2) nvml.Return { + if mock.DeviceSetPowerManagementLimit_v2Func == nil { + panic("Interface.DeviceSetPowerManagementLimit_v2Func: method is nil but Interface.DeviceSetPowerManagementLimit_v2 was just called") + } + callInfo := struct { + Device nvml.Device + PowerValue_v2 *nvml.PowerValue_v2 + }{ + Device: device, + PowerValue_v2: powerValue_v2, + } + mock.lockDeviceSetPowerManagementLimit_v2.Lock() + mock.calls.DeviceSetPowerManagementLimit_v2 = append(mock.calls.DeviceSetPowerManagementLimit_v2, callInfo) + mock.lockDeviceSetPowerManagementLimit_v2.Unlock() + return mock.DeviceSetPowerManagementLimit_v2Func(device, powerValue_v2) +} + +// DeviceSetPowerManagementLimit_v2Calls gets all the calls that were made to DeviceSetPowerManagementLimit_v2. +// Check the length with: +// +// len(mockedInterface.DeviceSetPowerManagementLimit_v2Calls()) +func (mock *Interface) DeviceSetPowerManagementLimit_v2Calls() []struct { + Device nvml.Device + PowerValue_v2 *nvml.PowerValue_v2 +} { + var calls []struct { + Device nvml.Device + PowerValue_v2 *nvml.PowerValue_v2 + } + mock.lockDeviceSetPowerManagementLimit_v2.RLock() + calls = mock.calls.DeviceSetPowerManagementLimit_v2 + mock.lockDeviceSetPowerManagementLimit_v2.RUnlock() + return calls +} + // DeviceSetTemperatureThreshold calls DeviceSetTemperatureThresholdFunc. func (mock *Interface) DeviceSetTemperatureThreshold(device nvml.Device, temperatureThresholds nvml.TemperatureThresholds, n int) nvml.Return { if mock.DeviceSetTemperatureThresholdFunc == nil { @@ -10800,6 +13100,82 @@ func (mock *Interface) DeviceSetTemperatureThresholdCalls() []struct { return calls } +// DeviceSetVgpuCapabilities calls DeviceSetVgpuCapabilitiesFunc. +func (mock *Interface) DeviceSetVgpuCapabilities(device nvml.Device, deviceVgpuCapability nvml.DeviceVgpuCapability, enableState nvml.EnableState) nvml.Return { + if mock.DeviceSetVgpuCapabilitiesFunc == nil { + panic("Interface.DeviceSetVgpuCapabilitiesFunc: method is nil but Interface.DeviceSetVgpuCapabilities was just called") + } + callInfo := struct { + Device nvml.Device + DeviceVgpuCapability nvml.DeviceVgpuCapability + EnableState nvml.EnableState + }{ + Device: device, + DeviceVgpuCapability: deviceVgpuCapability, + EnableState: enableState, + } + mock.lockDeviceSetVgpuCapabilities.Lock() + mock.calls.DeviceSetVgpuCapabilities = append(mock.calls.DeviceSetVgpuCapabilities, callInfo) + mock.lockDeviceSetVgpuCapabilities.Unlock() + return mock.DeviceSetVgpuCapabilitiesFunc(device, deviceVgpuCapability, enableState) +} + +// DeviceSetVgpuCapabilitiesCalls gets all the calls that were made to DeviceSetVgpuCapabilities. +// Check the length with: +// +// len(mockedInterface.DeviceSetVgpuCapabilitiesCalls()) +func (mock *Interface) DeviceSetVgpuCapabilitiesCalls() []struct { + Device nvml.Device + DeviceVgpuCapability nvml.DeviceVgpuCapability + EnableState nvml.EnableState +} { + var calls []struct { + Device nvml.Device + DeviceVgpuCapability nvml.DeviceVgpuCapability + EnableState nvml.EnableState + } + mock.lockDeviceSetVgpuCapabilities.RLock() + calls = mock.calls.DeviceSetVgpuCapabilities + mock.lockDeviceSetVgpuCapabilities.RUnlock() + return calls +} + +// DeviceSetVgpuHeterogeneousMode calls DeviceSetVgpuHeterogeneousModeFunc. +func (mock *Interface) DeviceSetVgpuHeterogeneousMode(device nvml.Device, vgpuHeterogeneousMode nvml.VgpuHeterogeneousMode) nvml.Return { + if mock.DeviceSetVgpuHeterogeneousModeFunc == nil { + panic("Interface.DeviceSetVgpuHeterogeneousModeFunc: method is nil but Interface.DeviceSetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + Device nvml.Device + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode + }{ + Device: device, + VgpuHeterogeneousMode: vgpuHeterogeneousMode, + } + mock.lockDeviceSetVgpuHeterogeneousMode.Lock() + mock.calls.DeviceSetVgpuHeterogeneousMode = append(mock.calls.DeviceSetVgpuHeterogeneousMode, callInfo) + mock.lockDeviceSetVgpuHeterogeneousMode.Unlock() + return mock.DeviceSetVgpuHeterogeneousModeFunc(device, vgpuHeterogeneousMode) +} + +// DeviceSetVgpuHeterogeneousModeCalls gets all the calls that were made to DeviceSetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedInterface.DeviceSetVgpuHeterogeneousModeCalls()) +func (mock *Interface) DeviceSetVgpuHeterogeneousModeCalls() []struct { + Device nvml.Device + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode +} { + var calls []struct { + Device nvml.Device + VgpuHeterogeneousMode nvml.VgpuHeterogeneousMode + } + mock.lockDeviceSetVgpuHeterogeneousMode.RLock() + calls = mock.calls.DeviceSetVgpuHeterogeneousMode + mock.lockDeviceSetVgpuHeterogeneousMode.RUnlock() + return calls +} + // DeviceSetVgpuSchedulerState calls DeviceSetVgpuSchedulerStateFunc. func (mock *Interface) DeviceSetVgpuSchedulerState(device nvml.Device, vgpuSchedulerSetState *nvml.VgpuSchedulerSetState) nvml.Return { if mock.DeviceSetVgpuSchedulerStateFunc == nil { @@ -10904,6 +13280,142 @@ func (mock *Interface) DeviceValidateInforomCalls() []struct { return calls } +// DeviceWorkloadPowerProfileClearRequestedProfiles calls DeviceWorkloadPowerProfileClearRequestedProfilesFunc. +func (mock *Interface) DeviceWorkloadPowerProfileClearRequestedProfiles(device nvml.Device, workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { + if mock.DeviceWorkloadPowerProfileClearRequestedProfilesFunc == nil { + panic("Interface.DeviceWorkloadPowerProfileClearRequestedProfilesFunc: method is nil but Interface.DeviceWorkloadPowerProfileClearRequestedProfiles was just called") + } + callInfo := struct { + Device nvml.Device + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + }{ + Device: device, + WorkloadPowerProfileRequestedProfiles: workloadPowerProfileRequestedProfiles, + } + mock.lockDeviceWorkloadPowerProfileClearRequestedProfiles.Lock() + mock.calls.DeviceWorkloadPowerProfileClearRequestedProfiles = append(mock.calls.DeviceWorkloadPowerProfileClearRequestedProfiles, callInfo) + mock.lockDeviceWorkloadPowerProfileClearRequestedProfiles.Unlock() + return mock.DeviceWorkloadPowerProfileClearRequestedProfilesFunc(device, workloadPowerProfileRequestedProfiles) +} + +// DeviceWorkloadPowerProfileClearRequestedProfilesCalls gets all the calls that were made to DeviceWorkloadPowerProfileClearRequestedProfiles. +// Check the length with: +// +// len(mockedInterface.DeviceWorkloadPowerProfileClearRequestedProfilesCalls()) +func (mock *Interface) DeviceWorkloadPowerProfileClearRequestedProfilesCalls() []struct { + Device nvml.Device + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles +} { + var calls []struct { + Device nvml.Device + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + mock.lockDeviceWorkloadPowerProfileClearRequestedProfiles.RLock() + calls = mock.calls.DeviceWorkloadPowerProfileClearRequestedProfiles + mock.lockDeviceWorkloadPowerProfileClearRequestedProfiles.RUnlock() + return calls +} + +// DeviceWorkloadPowerProfileGetCurrentProfiles calls DeviceWorkloadPowerProfileGetCurrentProfilesFunc. +func (mock *Interface) DeviceWorkloadPowerProfileGetCurrentProfiles(device nvml.Device) (nvml.WorkloadPowerProfileCurrentProfiles, nvml.Return) { + if mock.DeviceWorkloadPowerProfileGetCurrentProfilesFunc == nil { + panic("Interface.DeviceWorkloadPowerProfileGetCurrentProfilesFunc: method is nil but Interface.DeviceWorkloadPowerProfileGetCurrentProfiles was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceWorkloadPowerProfileGetCurrentProfiles.Lock() + mock.calls.DeviceWorkloadPowerProfileGetCurrentProfiles = append(mock.calls.DeviceWorkloadPowerProfileGetCurrentProfiles, callInfo) + mock.lockDeviceWorkloadPowerProfileGetCurrentProfiles.Unlock() + return mock.DeviceWorkloadPowerProfileGetCurrentProfilesFunc(device) +} + +// DeviceWorkloadPowerProfileGetCurrentProfilesCalls gets all the calls that were made to DeviceWorkloadPowerProfileGetCurrentProfiles. +// Check the length with: +// +// len(mockedInterface.DeviceWorkloadPowerProfileGetCurrentProfilesCalls()) +func (mock *Interface) DeviceWorkloadPowerProfileGetCurrentProfilesCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceWorkloadPowerProfileGetCurrentProfiles.RLock() + calls = mock.calls.DeviceWorkloadPowerProfileGetCurrentProfiles + mock.lockDeviceWorkloadPowerProfileGetCurrentProfiles.RUnlock() + return calls +} + +// DeviceWorkloadPowerProfileGetProfilesInfo calls DeviceWorkloadPowerProfileGetProfilesInfoFunc. +func (mock *Interface) DeviceWorkloadPowerProfileGetProfilesInfo(device nvml.Device) (nvml.WorkloadPowerProfileProfilesInfo, nvml.Return) { + if mock.DeviceWorkloadPowerProfileGetProfilesInfoFunc == nil { + panic("Interface.DeviceWorkloadPowerProfileGetProfilesInfoFunc: method is nil but Interface.DeviceWorkloadPowerProfileGetProfilesInfo was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockDeviceWorkloadPowerProfileGetProfilesInfo.Lock() + mock.calls.DeviceWorkloadPowerProfileGetProfilesInfo = append(mock.calls.DeviceWorkloadPowerProfileGetProfilesInfo, callInfo) + mock.lockDeviceWorkloadPowerProfileGetProfilesInfo.Unlock() + return mock.DeviceWorkloadPowerProfileGetProfilesInfoFunc(device) +} + +// DeviceWorkloadPowerProfileGetProfilesInfoCalls gets all the calls that were made to DeviceWorkloadPowerProfileGetProfilesInfo. +// Check the length with: +// +// len(mockedInterface.DeviceWorkloadPowerProfileGetProfilesInfoCalls()) +func (mock *Interface) DeviceWorkloadPowerProfileGetProfilesInfoCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockDeviceWorkloadPowerProfileGetProfilesInfo.RLock() + calls = mock.calls.DeviceWorkloadPowerProfileGetProfilesInfo + mock.lockDeviceWorkloadPowerProfileGetProfilesInfo.RUnlock() + return calls +} + +// DeviceWorkloadPowerProfileSetRequestedProfiles calls DeviceWorkloadPowerProfileSetRequestedProfilesFunc. +func (mock *Interface) DeviceWorkloadPowerProfileSetRequestedProfiles(device nvml.Device, workloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles) nvml.Return { + if mock.DeviceWorkloadPowerProfileSetRequestedProfilesFunc == nil { + panic("Interface.DeviceWorkloadPowerProfileSetRequestedProfilesFunc: method is nil but Interface.DeviceWorkloadPowerProfileSetRequestedProfiles was just called") + } + callInfo := struct { + Device nvml.Device + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + }{ + Device: device, + WorkloadPowerProfileRequestedProfiles: workloadPowerProfileRequestedProfiles, + } + mock.lockDeviceWorkloadPowerProfileSetRequestedProfiles.Lock() + mock.calls.DeviceWorkloadPowerProfileSetRequestedProfiles = append(mock.calls.DeviceWorkloadPowerProfileSetRequestedProfiles, callInfo) + mock.lockDeviceWorkloadPowerProfileSetRequestedProfiles.Unlock() + return mock.DeviceWorkloadPowerProfileSetRequestedProfilesFunc(device, workloadPowerProfileRequestedProfiles) +} + +// DeviceWorkloadPowerProfileSetRequestedProfilesCalls gets all the calls that were made to DeviceWorkloadPowerProfileSetRequestedProfiles. +// Check the length with: +// +// len(mockedInterface.DeviceWorkloadPowerProfileSetRequestedProfilesCalls()) +func (mock *Interface) DeviceWorkloadPowerProfileSetRequestedProfilesCalls() []struct { + Device nvml.Device + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles +} { + var calls []struct { + Device nvml.Device + WorkloadPowerProfileRequestedProfiles *nvml.WorkloadPowerProfileRequestedProfiles + } + mock.lockDeviceWorkloadPowerProfileSetRequestedProfiles.RLock() + calls = mock.calls.DeviceWorkloadPowerProfileSetRequestedProfiles + mock.lockDeviceWorkloadPowerProfileSetRequestedProfiles.RUnlock() + return calls +} + // ErrorString calls ErrorStringFunc. func (mock *Interface) ErrorString(returnMoqParam nvml.Return) string { if mock.ErrorStringFunc == nil { @@ -11380,6 +13892,38 @@ func (mock *Interface) GpmQueryDeviceSupportVCalls() []struct { return calls } +// GpmQueryIfStreamingEnabled calls GpmQueryIfStreamingEnabledFunc. +func (mock *Interface) GpmQueryIfStreamingEnabled(device nvml.Device) (uint32, nvml.Return) { + if mock.GpmQueryIfStreamingEnabledFunc == nil { + panic("Interface.GpmQueryIfStreamingEnabledFunc: method is nil but Interface.GpmQueryIfStreamingEnabled was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockGpmQueryIfStreamingEnabled.Lock() + mock.calls.GpmQueryIfStreamingEnabled = append(mock.calls.GpmQueryIfStreamingEnabled, callInfo) + mock.lockGpmQueryIfStreamingEnabled.Unlock() + return mock.GpmQueryIfStreamingEnabledFunc(device) +} + +// GpmQueryIfStreamingEnabledCalls gets all the calls that were made to GpmQueryIfStreamingEnabled. +// Check the length with: +// +// len(mockedInterface.GpmQueryIfStreamingEnabledCalls()) +func (mock *Interface) GpmQueryIfStreamingEnabledCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockGpmQueryIfStreamingEnabled.RLock() + calls = mock.calls.GpmQueryIfStreamingEnabled + mock.lockGpmQueryIfStreamingEnabled.RUnlock() + return calls +} + // GpmSampleAlloc calls GpmSampleAllocFunc. func (mock *Interface) GpmSampleAlloc() (nvml.GpmSample, nvml.Return) { if mock.GpmSampleAllocFunc == nil { @@ -11475,6 +14019,42 @@ func (mock *Interface) GpmSampleGetCalls() []struct { return calls } +// GpmSetStreamingEnabled calls GpmSetStreamingEnabledFunc. +func (mock *Interface) GpmSetStreamingEnabled(device nvml.Device, v uint32) nvml.Return { + if mock.GpmSetStreamingEnabledFunc == nil { + panic("Interface.GpmSetStreamingEnabledFunc: method is nil but Interface.GpmSetStreamingEnabled was just called") + } + callInfo := struct { + Device nvml.Device + V uint32 + }{ + Device: device, + V: v, + } + mock.lockGpmSetStreamingEnabled.Lock() + mock.calls.GpmSetStreamingEnabled = append(mock.calls.GpmSetStreamingEnabled, callInfo) + mock.lockGpmSetStreamingEnabled.Unlock() + return mock.GpmSetStreamingEnabledFunc(device, v) +} + +// GpmSetStreamingEnabledCalls gets all the calls that were made to GpmSetStreamingEnabled. +// Check the length with: +// +// len(mockedInterface.GpmSetStreamingEnabledCalls()) +func (mock *Interface) GpmSetStreamingEnabledCalls() []struct { + Device nvml.Device + V uint32 +} { + var calls []struct { + Device nvml.Device + V uint32 + } + mock.lockGpmSetStreamingEnabled.RLock() + calls = mock.calls.GpmSetStreamingEnabled + mock.lockGpmSetStreamingEnabled.RUnlock() + return calls +} + // GpuInstanceCreateComputeInstance calls GpuInstanceCreateComputeInstanceFunc. func (mock *Interface) GpuInstanceCreateComputeInstance(gpuInstance nvml.GpuInstance, computeInstanceProfileInfo *nvml.ComputeInstanceProfileInfo) (nvml.ComputeInstance, nvml.Return) { if mock.GpuInstanceCreateComputeInstanceFunc == nil { @@ -11583,6 +14163,38 @@ func (mock *Interface) GpuInstanceDestroyCalls() []struct { return calls } +// GpuInstanceGetActiveVgpus calls GpuInstanceGetActiveVgpusFunc. +func (mock *Interface) GpuInstanceGetActiveVgpus(gpuInstance nvml.GpuInstance) (nvml.ActiveVgpuInstanceInfo, nvml.Return) { + if mock.GpuInstanceGetActiveVgpusFunc == nil { + panic("Interface.GpuInstanceGetActiveVgpusFunc: method is nil but Interface.GpuInstanceGetActiveVgpus was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + }{ + GpuInstance: gpuInstance, + } + mock.lockGpuInstanceGetActiveVgpus.Lock() + mock.calls.GpuInstanceGetActiveVgpus = append(mock.calls.GpuInstanceGetActiveVgpus, callInfo) + mock.lockGpuInstanceGetActiveVgpus.Unlock() + return mock.GpuInstanceGetActiveVgpusFunc(gpuInstance) +} + +// GpuInstanceGetActiveVgpusCalls gets all the calls that were made to GpuInstanceGetActiveVgpus. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetActiveVgpusCalls()) +func (mock *Interface) GpuInstanceGetActiveVgpusCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetActiveVgpus.RLock() + calls = mock.calls.GpuInstanceGetActiveVgpus + mock.lockGpuInstanceGetActiveVgpus.RUnlock() + return calls +} + // GpuInstanceGetComputeInstanceById calls GpuInstanceGetComputeInstanceByIdFunc. func (mock *Interface) GpuInstanceGetComputeInstanceById(gpuInstance nvml.GpuInstance, n int) (nvml.ComputeInstance, nvml.Return) { if mock.GpuInstanceGetComputeInstanceByIdFunc == nil { @@ -11696,7 +14308,7 @@ func (mock *Interface) GpuInstanceGetComputeInstanceProfileInfoCalls() []struct } // GpuInstanceGetComputeInstanceProfileInfoV calls GpuInstanceGetComputeInstanceProfileInfoVFunc. -func (mock *Interface) GpuInstanceGetComputeInstanceProfileInfoV(gpuInstance nvml.GpuInstance, n1 int, n2 int) nvml.ComputeInstanceProfileInfoV { +func (mock *Interface) GpuInstanceGetComputeInstanceProfileInfoV(gpuInstance nvml.GpuInstance, n1 int, n2 int) nvml.ComputeInstanceProfileInfoHandler { if mock.GpuInstanceGetComputeInstanceProfileInfoVFunc == nil { panic("Interface.GpuInstanceGetComputeInstanceProfileInfoVFunc: method is nil but Interface.GpuInstanceGetComputeInstanceProfileInfoV was just called") } @@ -11807,6 +14419,38 @@ func (mock *Interface) GpuInstanceGetComputeInstancesCalls() []struct { return calls } +// GpuInstanceGetCreatableVgpus calls GpuInstanceGetCreatableVgpusFunc. +func (mock *Interface) GpuInstanceGetCreatableVgpus(gpuInstance nvml.GpuInstance) (nvml.VgpuTypeIdInfo, nvml.Return) { + if mock.GpuInstanceGetCreatableVgpusFunc == nil { + panic("Interface.GpuInstanceGetCreatableVgpusFunc: method is nil but Interface.GpuInstanceGetCreatableVgpus was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + }{ + GpuInstance: gpuInstance, + } + mock.lockGpuInstanceGetCreatableVgpus.Lock() + mock.calls.GpuInstanceGetCreatableVgpus = append(mock.calls.GpuInstanceGetCreatableVgpus, callInfo) + mock.lockGpuInstanceGetCreatableVgpus.Unlock() + return mock.GpuInstanceGetCreatableVgpusFunc(gpuInstance) +} + +// GpuInstanceGetCreatableVgpusCalls gets all the calls that were made to GpuInstanceGetCreatableVgpus. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetCreatableVgpusCalls()) +func (mock *Interface) GpuInstanceGetCreatableVgpusCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetCreatableVgpus.RLock() + calls = mock.calls.GpuInstanceGetCreatableVgpus + mock.lockGpuInstanceGetCreatableVgpus.RUnlock() + return calls +} + // GpuInstanceGetInfo calls GpuInstanceGetInfoFunc. func (mock *Interface) GpuInstanceGetInfo(gpuInstance nvml.GpuInstance) (nvml.GpuInstanceInfo, nvml.Return) { if mock.GpuInstanceGetInfoFunc == nil { @@ -11823,19 +14467,219 @@ func (mock *Interface) GpuInstanceGetInfo(gpuInstance nvml.GpuInstance) (nvml.Gp return mock.GpuInstanceGetInfoFunc(gpuInstance) } -// GpuInstanceGetInfoCalls gets all the calls that were made to GpuInstanceGetInfo. +// GpuInstanceGetInfoCalls gets all the calls that were made to GpuInstanceGetInfo. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetInfoCalls()) +func (mock *Interface) GpuInstanceGetInfoCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetInfo.RLock() + calls = mock.calls.GpuInstanceGetInfo + mock.lockGpuInstanceGetInfo.RUnlock() + return calls +} + +// GpuInstanceGetVgpuHeterogeneousMode calls GpuInstanceGetVgpuHeterogeneousModeFunc. +func (mock *Interface) GpuInstanceGetVgpuHeterogeneousMode(gpuInstance nvml.GpuInstance) (nvml.VgpuHeterogeneousMode, nvml.Return) { + if mock.GpuInstanceGetVgpuHeterogeneousModeFunc == nil { + panic("Interface.GpuInstanceGetVgpuHeterogeneousModeFunc: method is nil but Interface.GpuInstanceGetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + }{ + GpuInstance: gpuInstance, + } + mock.lockGpuInstanceGetVgpuHeterogeneousMode.Lock() + mock.calls.GpuInstanceGetVgpuHeterogeneousMode = append(mock.calls.GpuInstanceGetVgpuHeterogeneousMode, callInfo) + mock.lockGpuInstanceGetVgpuHeterogeneousMode.Unlock() + return mock.GpuInstanceGetVgpuHeterogeneousModeFunc(gpuInstance) +} + +// GpuInstanceGetVgpuHeterogeneousModeCalls gets all the calls that were made to GpuInstanceGetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetVgpuHeterogeneousModeCalls()) +func (mock *Interface) GpuInstanceGetVgpuHeterogeneousModeCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetVgpuHeterogeneousMode.RLock() + calls = mock.calls.GpuInstanceGetVgpuHeterogeneousMode + mock.lockGpuInstanceGetVgpuHeterogeneousMode.RUnlock() + return calls +} + +// GpuInstanceGetVgpuSchedulerLog calls GpuInstanceGetVgpuSchedulerLogFunc. +func (mock *Interface) GpuInstanceGetVgpuSchedulerLog(gpuInstance nvml.GpuInstance) (nvml.VgpuSchedulerLogInfo, nvml.Return) { + if mock.GpuInstanceGetVgpuSchedulerLogFunc == nil { + panic("Interface.GpuInstanceGetVgpuSchedulerLogFunc: method is nil but Interface.GpuInstanceGetVgpuSchedulerLog was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + }{ + GpuInstance: gpuInstance, + } + mock.lockGpuInstanceGetVgpuSchedulerLog.Lock() + mock.calls.GpuInstanceGetVgpuSchedulerLog = append(mock.calls.GpuInstanceGetVgpuSchedulerLog, callInfo) + mock.lockGpuInstanceGetVgpuSchedulerLog.Unlock() + return mock.GpuInstanceGetVgpuSchedulerLogFunc(gpuInstance) +} + +// GpuInstanceGetVgpuSchedulerLogCalls gets all the calls that were made to GpuInstanceGetVgpuSchedulerLog. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetVgpuSchedulerLogCalls()) +func (mock *Interface) GpuInstanceGetVgpuSchedulerLogCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetVgpuSchedulerLog.RLock() + calls = mock.calls.GpuInstanceGetVgpuSchedulerLog + mock.lockGpuInstanceGetVgpuSchedulerLog.RUnlock() + return calls +} + +// GpuInstanceGetVgpuSchedulerState calls GpuInstanceGetVgpuSchedulerStateFunc. +func (mock *Interface) GpuInstanceGetVgpuSchedulerState(gpuInstance nvml.GpuInstance) (nvml.VgpuSchedulerStateInfo, nvml.Return) { + if mock.GpuInstanceGetVgpuSchedulerStateFunc == nil { + panic("Interface.GpuInstanceGetVgpuSchedulerStateFunc: method is nil but Interface.GpuInstanceGetVgpuSchedulerState was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + }{ + GpuInstance: gpuInstance, + } + mock.lockGpuInstanceGetVgpuSchedulerState.Lock() + mock.calls.GpuInstanceGetVgpuSchedulerState = append(mock.calls.GpuInstanceGetVgpuSchedulerState, callInfo) + mock.lockGpuInstanceGetVgpuSchedulerState.Unlock() + return mock.GpuInstanceGetVgpuSchedulerStateFunc(gpuInstance) +} + +// GpuInstanceGetVgpuSchedulerStateCalls gets all the calls that were made to GpuInstanceGetVgpuSchedulerState. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetVgpuSchedulerStateCalls()) +func (mock *Interface) GpuInstanceGetVgpuSchedulerStateCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetVgpuSchedulerState.RLock() + calls = mock.calls.GpuInstanceGetVgpuSchedulerState + mock.lockGpuInstanceGetVgpuSchedulerState.RUnlock() + return calls +} + +// GpuInstanceGetVgpuTypeCreatablePlacements calls GpuInstanceGetVgpuTypeCreatablePlacementsFunc. +func (mock *Interface) GpuInstanceGetVgpuTypeCreatablePlacements(gpuInstance nvml.GpuInstance) (nvml.VgpuCreatablePlacementInfo, nvml.Return) { + if mock.GpuInstanceGetVgpuTypeCreatablePlacementsFunc == nil { + panic("Interface.GpuInstanceGetVgpuTypeCreatablePlacementsFunc: method is nil but Interface.GpuInstanceGetVgpuTypeCreatablePlacements was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + }{ + GpuInstance: gpuInstance, + } + mock.lockGpuInstanceGetVgpuTypeCreatablePlacements.Lock() + mock.calls.GpuInstanceGetVgpuTypeCreatablePlacements = append(mock.calls.GpuInstanceGetVgpuTypeCreatablePlacements, callInfo) + mock.lockGpuInstanceGetVgpuTypeCreatablePlacements.Unlock() + return mock.GpuInstanceGetVgpuTypeCreatablePlacementsFunc(gpuInstance) +} + +// GpuInstanceGetVgpuTypeCreatablePlacementsCalls gets all the calls that were made to GpuInstanceGetVgpuTypeCreatablePlacements. +// Check the length with: +// +// len(mockedInterface.GpuInstanceGetVgpuTypeCreatablePlacementsCalls()) +func (mock *Interface) GpuInstanceGetVgpuTypeCreatablePlacementsCalls() []struct { + GpuInstance nvml.GpuInstance +} { + var calls []struct { + GpuInstance nvml.GpuInstance + } + mock.lockGpuInstanceGetVgpuTypeCreatablePlacements.RLock() + calls = mock.calls.GpuInstanceGetVgpuTypeCreatablePlacements + mock.lockGpuInstanceGetVgpuTypeCreatablePlacements.RUnlock() + return calls +} + +// GpuInstanceSetVgpuHeterogeneousMode calls GpuInstanceSetVgpuHeterogeneousModeFunc. +func (mock *Interface) GpuInstanceSetVgpuHeterogeneousMode(gpuInstance nvml.GpuInstance, vgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode) nvml.Return { + if mock.GpuInstanceSetVgpuHeterogeneousModeFunc == nil { + panic("Interface.GpuInstanceSetVgpuHeterogeneousModeFunc: method is nil but Interface.GpuInstanceSetVgpuHeterogeneousMode was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode + }{ + GpuInstance: gpuInstance, + VgpuHeterogeneousMode: vgpuHeterogeneousMode, + } + mock.lockGpuInstanceSetVgpuHeterogeneousMode.Lock() + mock.calls.GpuInstanceSetVgpuHeterogeneousMode = append(mock.calls.GpuInstanceSetVgpuHeterogeneousMode, callInfo) + mock.lockGpuInstanceSetVgpuHeterogeneousMode.Unlock() + return mock.GpuInstanceSetVgpuHeterogeneousModeFunc(gpuInstance, vgpuHeterogeneousMode) +} + +// GpuInstanceSetVgpuHeterogeneousModeCalls gets all the calls that were made to GpuInstanceSetVgpuHeterogeneousMode. +// Check the length with: +// +// len(mockedInterface.GpuInstanceSetVgpuHeterogeneousModeCalls()) +func (mock *Interface) GpuInstanceSetVgpuHeterogeneousModeCalls() []struct { + GpuInstance nvml.GpuInstance + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode +} { + var calls []struct { + GpuInstance nvml.GpuInstance + VgpuHeterogeneousMode *nvml.VgpuHeterogeneousMode + } + mock.lockGpuInstanceSetVgpuHeterogeneousMode.RLock() + calls = mock.calls.GpuInstanceSetVgpuHeterogeneousMode + mock.lockGpuInstanceSetVgpuHeterogeneousMode.RUnlock() + return calls +} + +// GpuInstanceSetVgpuSchedulerState calls GpuInstanceSetVgpuSchedulerStateFunc. +func (mock *Interface) GpuInstanceSetVgpuSchedulerState(gpuInstance nvml.GpuInstance, vgpuSchedulerState *nvml.VgpuSchedulerState) nvml.Return { + if mock.GpuInstanceSetVgpuSchedulerStateFunc == nil { + panic("Interface.GpuInstanceSetVgpuSchedulerStateFunc: method is nil but Interface.GpuInstanceSetVgpuSchedulerState was just called") + } + callInfo := struct { + GpuInstance nvml.GpuInstance + VgpuSchedulerState *nvml.VgpuSchedulerState + }{ + GpuInstance: gpuInstance, + VgpuSchedulerState: vgpuSchedulerState, + } + mock.lockGpuInstanceSetVgpuSchedulerState.Lock() + mock.calls.GpuInstanceSetVgpuSchedulerState = append(mock.calls.GpuInstanceSetVgpuSchedulerState, callInfo) + mock.lockGpuInstanceSetVgpuSchedulerState.Unlock() + return mock.GpuInstanceSetVgpuSchedulerStateFunc(gpuInstance, vgpuSchedulerState) +} + +// GpuInstanceSetVgpuSchedulerStateCalls gets all the calls that were made to GpuInstanceSetVgpuSchedulerState. // Check the length with: // -// len(mockedInterface.GpuInstanceGetInfoCalls()) -func (mock *Interface) GpuInstanceGetInfoCalls() []struct { - GpuInstance nvml.GpuInstance +// len(mockedInterface.GpuInstanceSetVgpuSchedulerStateCalls()) +func (mock *Interface) GpuInstanceSetVgpuSchedulerStateCalls() []struct { + GpuInstance nvml.GpuInstance + VgpuSchedulerState *nvml.VgpuSchedulerState } { var calls []struct { - GpuInstance nvml.GpuInstance + GpuInstance nvml.GpuInstance + VgpuSchedulerState *nvml.VgpuSchedulerState } - mock.lockGpuInstanceGetInfo.RLock() - calls = mock.calls.GpuInstanceGetInfo - mock.lockGpuInstanceGetInfo.RUnlock() + mock.lockGpuInstanceSetVgpuSchedulerState.RLock() + calls = mock.calls.GpuInstanceSetVgpuSchedulerState + mock.lockGpuInstanceSetVgpuSchedulerState.RUnlock() return calls } @@ -11957,6 +14801,237 @@ func (mock *Interface) ShutdownCalls() []struct { return calls } +// SystemEventSetCreate calls SystemEventSetCreateFunc. +func (mock *Interface) SystemEventSetCreate(systemEventSetCreateRequest *nvml.SystemEventSetCreateRequest) nvml.Return { + if mock.SystemEventSetCreateFunc == nil { + panic("Interface.SystemEventSetCreateFunc: method is nil but Interface.SystemEventSetCreate was just called") + } + callInfo := struct { + SystemEventSetCreateRequest *nvml.SystemEventSetCreateRequest + }{ + SystemEventSetCreateRequest: systemEventSetCreateRequest, + } + mock.lockSystemEventSetCreate.Lock() + mock.calls.SystemEventSetCreate = append(mock.calls.SystemEventSetCreate, callInfo) + mock.lockSystemEventSetCreate.Unlock() + return mock.SystemEventSetCreateFunc(systemEventSetCreateRequest) +} + +// SystemEventSetCreateCalls gets all the calls that were made to SystemEventSetCreate. +// Check the length with: +// +// len(mockedInterface.SystemEventSetCreateCalls()) +func (mock *Interface) SystemEventSetCreateCalls() []struct { + SystemEventSetCreateRequest *nvml.SystemEventSetCreateRequest +} { + var calls []struct { + SystemEventSetCreateRequest *nvml.SystemEventSetCreateRequest + } + mock.lockSystemEventSetCreate.RLock() + calls = mock.calls.SystemEventSetCreate + mock.lockSystemEventSetCreate.RUnlock() + return calls +} + +// SystemEventSetFree calls SystemEventSetFreeFunc. +func (mock *Interface) SystemEventSetFree(systemEventSetFreeRequest *nvml.SystemEventSetFreeRequest) nvml.Return { + if mock.SystemEventSetFreeFunc == nil { + panic("Interface.SystemEventSetFreeFunc: method is nil but Interface.SystemEventSetFree was just called") + } + callInfo := struct { + SystemEventSetFreeRequest *nvml.SystemEventSetFreeRequest + }{ + SystemEventSetFreeRequest: systemEventSetFreeRequest, + } + mock.lockSystemEventSetFree.Lock() + mock.calls.SystemEventSetFree = append(mock.calls.SystemEventSetFree, callInfo) + mock.lockSystemEventSetFree.Unlock() + return mock.SystemEventSetFreeFunc(systemEventSetFreeRequest) +} + +// SystemEventSetFreeCalls gets all the calls that were made to SystemEventSetFree. +// Check the length with: +// +// len(mockedInterface.SystemEventSetFreeCalls()) +func (mock *Interface) SystemEventSetFreeCalls() []struct { + SystemEventSetFreeRequest *nvml.SystemEventSetFreeRequest +} { + var calls []struct { + SystemEventSetFreeRequest *nvml.SystemEventSetFreeRequest + } + mock.lockSystemEventSetFree.RLock() + calls = mock.calls.SystemEventSetFree + mock.lockSystemEventSetFree.RUnlock() + return calls +} + +// SystemEventSetWait calls SystemEventSetWaitFunc. +func (mock *Interface) SystemEventSetWait(systemEventSetWaitRequest *nvml.SystemEventSetWaitRequest) nvml.Return { + if mock.SystemEventSetWaitFunc == nil { + panic("Interface.SystemEventSetWaitFunc: method is nil but Interface.SystemEventSetWait was just called") + } + callInfo := struct { + SystemEventSetWaitRequest *nvml.SystemEventSetWaitRequest + }{ + SystemEventSetWaitRequest: systemEventSetWaitRequest, + } + mock.lockSystemEventSetWait.Lock() + mock.calls.SystemEventSetWait = append(mock.calls.SystemEventSetWait, callInfo) + mock.lockSystemEventSetWait.Unlock() + return mock.SystemEventSetWaitFunc(systemEventSetWaitRequest) +} + +// SystemEventSetWaitCalls gets all the calls that were made to SystemEventSetWait. +// Check the length with: +// +// len(mockedInterface.SystemEventSetWaitCalls()) +func (mock *Interface) SystemEventSetWaitCalls() []struct { + SystemEventSetWaitRequest *nvml.SystemEventSetWaitRequest +} { + var calls []struct { + SystemEventSetWaitRequest *nvml.SystemEventSetWaitRequest + } + mock.lockSystemEventSetWait.RLock() + calls = mock.calls.SystemEventSetWait + mock.lockSystemEventSetWait.RUnlock() + return calls +} + +// SystemGetConfComputeCapabilities calls SystemGetConfComputeCapabilitiesFunc. +func (mock *Interface) SystemGetConfComputeCapabilities() (nvml.ConfComputeSystemCaps, nvml.Return) { + if mock.SystemGetConfComputeCapabilitiesFunc == nil { + panic("Interface.SystemGetConfComputeCapabilitiesFunc: method is nil but Interface.SystemGetConfComputeCapabilities was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetConfComputeCapabilities.Lock() + mock.calls.SystemGetConfComputeCapabilities = append(mock.calls.SystemGetConfComputeCapabilities, callInfo) + mock.lockSystemGetConfComputeCapabilities.Unlock() + return mock.SystemGetConfComputeCapabilitiesFunc() +} + +// SystemGetConfComputeCapabilitiesCalls gets all the calls that were made to SystemGetConfComputeCapabilities. +// Check the length with: +// +// len(mockedInterface.SystemGetConfComputeCapabilitiesCalls()) +func (mock *Interface) SystemGetConfComputeCapabilitiesCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetConfComputeCapabilities.RLock() + calls = mock.calls.SystemGetConfComputeCapabilities + mock.lockSystemGetConfComputeCapabilities.RUnlock() + return calls +} + +// SystemGetConfComputeGpusReadyState calls SystemGetConfComputeGpusReadyStateFunc. +func (mock *Interface) SystemGetConfComputeGpusReadyState() (uint32, nvml.Return) { + if mock.SystemGetConfComputeGpusReadyStateFunc == nil { + panic("Interface.SystemGetConfComputeGpusReadyStateFunc: method is nil but Interface.SystemGetConfComputeGpusReadyState was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetConfComputeGpusReadyState.Lock() + mock.calls.SystemGetConfComputeGpusReadyState = append(mock.calls.SystemGetConfComputeGpusReadyState, callInfo) + mock.lockSystemGetConfComputeGpusReadyState.Unlock() + return mock.SystemGetConfComputeGpusReadyStateFunc() +} + +// SystemGetConfComputeGpusReadyStateCalls gets all the calls that were made to SystemGetConfComputeGpusReadyState. +// Check the length with: +// +// len(mockedInterface.SystemGetConfComputeGpusReadyStateCalls()) +func (mock *Interface) SystemGetConfComputeGpusReadyStateCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetConfComputeGpusReadyState.RLock() + calls = mock.calls.SystemGetConfComputeGpusReadyState + mock.lockSystemGetConfComputeGpusReadyState.RUnlock() + return calls +} + +// SystemGetConfComputeKeyRotationThresholdInfo calls SystemGetConfComputeKeyRotationThresholdInfoFunc. +func (mock *Interface) SystemGetConfComputeKeyRotationThresholdInfo() (nvml.ConfComputeGetKeyRotationThresholdInfo, nvml.Return) { + if mock.SystemGetConfComputeKeyRotationThresholdInfoFunc == nil { + panic("Interface.SystemGetConfComputeKeyRotationThresholdInfoFunc: method is nil but Interface.SystemGetConfComputeKeyRotationThresholdInfo was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetConfComputeKeyRotationThresholdInfo.Lock() + mock.calls.SystemGetConfComputeKeyRotationThresholdInfo = append(mock.calls.SystemGetConfComputeKeyRotationThresholdInfo, callInfo) + mock.lockSystemGetConfComputeKeyRotationThresholdInfo.Unlock() + return mock.SystemGetConfComputeKeyRotationThresholdInfoFunc() +} + +// SystemGetConfComputeKeyRotationThresholdInfoCalls gets all the calls that were made to SystemGetConfComputeKeyRotationThresholdInfo. +// Check the length with: +// +// len(mockedInterface.SystemGetConfComputeKeyRotationThresholdInfoCalls()) +func (mock *Interface) SystemGetConfComputeKeyRotationThresholdInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetConfComputeKeyRotationThresholdInfo.RLock() + calls = mock.calls.SystemGetConfComputeKeyRotationThresholdInfo + mock.lockSystemGetConfComputeKeyRotationThresholdInfo.RUnlock() + return calls +} + +// SystemGetConfComputeSettings calls SystemGetConfComputeSettingsFunc. +func (mock *Interface) SystemGetConfComputeSettings() (nvml.SystemConfComputeSettings, nvml.Return) { + if mock.SystemGetConfComputeSettingsFunc == nil { + panic("Interface.SystemGetConfComputeSettingsFunc: method is nil but Interface.SystemGetConfComputeSettings was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetConfComputeSettings.Lock() + mock.calls.SystemGetConfComputeSettings = append(mock.calls.SystemGetConfComputeSettings, callInfo) + mock.lockSystemGetConfComputeSettings.Unlock() + return mock.SystemGetConfComputeSettingsFunc() +} + +// SystemGetConfComputeSettingsCalls gets all the calls that were made to SystemGetConfComputeSettings. +// Check the length with: +// +// len(mockedInterface.SystemGetConfComputeSettingsCalls()) +func (mock *Interface) SystemGetConfComputeSettingsCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetConfComputeSettings.RLock() + calls = mock.calls.SystemGetConfComputeSettings + mock.lockSystemGetConfComputeSettings.RUnlock() + return calls +} + +// SystemGetConfComputeState calls SystemGetConfComputeStateFunc. +func (mock *Interface) SystemGetConfComputeState() (nvml.ConfComputeSystemState, nvml.Return) { + if mock.SystemGetConfComputeStateFunc == nil { + panic("Interface.SystemGetConfComputeStateFunc: method is nil but Interface.SystemGetConfComputeState was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetConfComputeState.Lock() + mock.calls.SystemGetConfComputeState = append(mock.calls.SystemGetConfComputeState, callInfo) + mock.lockSystemGetConfComputeState.Unlock() + return mock.SystemGetConfComputeStateFunc() +} + +// SystemGetConfComputeStateCalls gets all the calls that were made to SystemGetConfComputeState. +// Check the length with: +// +// len(mockedInterface.SystemGetConfComputeStateCalls()) +func (mock *Interface) SystemGetConfComputeStateCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetConfComputeState.RLock() + calls = mock.calls.SystemGetConfComputeState + mock.lockSystemGetConfComputeState.RUnlock() + return calls +} + // SystemGetCudaDriverVersion calls SystemGetCudaDriverVersionFunc. func (mock *Interface) SystemGetCudaDriverVersion() (int, nvml.Return) { if mock.SystemGetCudaDriverVersionFunc == nil { @@ -12011,6 +15086,33 @@ func (mock *Interface) SystemGetCudaDriverVersion_v2Calls() []struct { return calls } +// SystemGetDriverBranch calls SystemGetDriverBranchFunc. +func (mock *Interface) SystemGetDriverBranch() (nvml.SystemDriverBranchInfo, nvml.Return) { + if mock.SystemGetDriverBranchFunc == nil { + panic("Interface.SystemGetDriverBranchFunc: method is nil but Interface.SystemGetDriverBranch was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetDriverBranch.Lock() + mock.calls.SystemGetDriverBranch = append(mock.calls.SystemGetDriverBranch, callInfo) + mock.lockSystemGetDriverBranch.Unlock() + return mock.SystemGetDriverBranchFunc() +} + +// SystemGetDriverBranchCalls gets all the calls that were made to SystemGetDriverBranch. +// Check the length with: +// +// len(mockedInterface.SystemGetDriverBranchCalls()) +func (mock *Interface) SystemGetDriverBranchCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetDriverBranch.RLock() + calls = mock.calls.SystemGetDriverBranch + mock.lockSystemGetDriverBranch.RUnlock() + return calls +} + // SystemGetDriverVersion calls SystemGetDriverVersionFunc. func (mock *Interface) SystemGetDriverVersion() (string, nvml.Return) { if mock.SystemGetDriverVersionFunc == nil { @@ -12092,6 +15194,33 @@ func (mock *Interface) SystemGetNVMLVersionCalls() []struct { return calls } +// SystemGetNvlinkBwMode calls SystemGetNvlinkBwModeFunc. +func (mock *Interface) SystemGetNvlinkBwMode() (uint32, nvml.Return) { + if mock.SystemGetNvlinkBwModeFunc == nil { + panic("Interface.SystemGetNvlinkBwModeFunc: method is nil but Interface.SystemGetNvlinkBwMode was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetNvlinkBwMode.Lock() + mock.calls.SystemGetNvlinkBwMode = append(mock.calls.SystemGetNvlinkBwMode, callInfo) + mock.lockSystemGetNvlinkBwMode.Unlock() + return mock.SystemGetNvlinkBwModeFunc() +} + +// SystemGetNvlinkBwModeCalls gets all the calls that were made to SystemGetNvlinkBwMode. +// Check the length with: +// +// len(mockedInterface.SystemGetNvlinkBwModeCalls()) +func (mock *Interface) SystemGetNvlinkBwModeCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetNvlinkBwMode.RLock() + calls = mock.calls.SystemGetNvlinkBwMode + mock.lockSystemGetNvlinkBwMode.RUnlock() + return calls +} + // SystemGetProcessName calls SystemGetProcessNameFunc. func (mock *Interface) SystemGetProcessName(n int) (string, nvml.Return) { if mock.SystemGetProcessNameFunc == nil { @@ -12156,6 +15285,134 @@ func (mock *Interface) SystemGetTopologyGpuSetCalls() []struct { return calls } +// SystemRegisterEvents calls SystemRegisterEventsFunc. +func (mock *Interface) SystemRegisterEvents(systemRegisterEventRequest *nvml.SystemRegisterEventRequest) nvml.Return { + if mock.SystemRegisterEventsFunc == nil { + panic("Interface.SystemRegisterEventsFunc: method is nil but Interface.SystemRegisterEvents was just called") + } + callInfo := struct { + SystemRegisterEventRequest *nvml.SystemRegisterEventRequest + }{ + SystemRegisterEventRequest: systemRegisterEventRequest, + } + mock.lockSystemRegisterEvents.Lock() + mock.calls.SystemRegisterEvents = append(mock.calls.SystemRegisterEvents, callInfo) + mock.lockSystemRegisterEvents.Unlock() + return mock.SystemRegisterEventsFunc(systemRegisterEventRequest) +} + +// SystemRegisterEventsCalls gets all the calls that were made to SystemRegisterEvents. +// Check the length with: +// +// len(mockedInterface.SystemRegisterEventsCalls()) +func (mock *Interface) SystemRegisterEventsCalls() []struct { + SystemRegisterEventRequest *nvml.SystemRegisterEventRequest +} { + var calls []struct { + SystemRegisterEventRequest *nvml.SystemRegisterEventRequest + } + mock.lockSystemRegisterEvents.RLock() + calls = mock.calls.SystemRegisterEvents + mock.lockSystemRegisterEvents.RUnlock() + return calls +} + +// SystemSetConfComputeGpusReadyState calls SystemSetConfComputeGpusReadyStateFunc. +func (mock *Interface) SystemSetConfComputeGpusReadyState(v uint32) nvml.Return { + if mock.SystemSetConfComputeGpusReadyStateFunc == nil { + panic("Interface.SystemSetConfComputeGpusReadyStateFunc: method is nil but Interface.SystemSetConfComputeGpusReadyState was just called") + } + callInfo := struct { + V uint32 + }{ + V: v, + } + mock.lockSystemSetConfComputeGpusReadyState.Lock() + mock.calls.SystemSetConfComputeGpusReadyState = append(mock.calls.SystemSetConfComputeGpusReadyState, callInfo) + mock.lockSystemSetConfComputeGpusReadyState.Unlock() + return mock.SystemSetConfComputeGpusReadyStateFunc(v) +} + +// SystemSetConfComputeGpusReadyStateCalls gets all the calls that were made to SystemSetConfComputeGpusReadyState. +// Check the length with: +// +// len(mockedInterface.SystemSetConfComputeGpusReadyStateCalls()) +func (mock *Interface) SystemSetConfComputeGpusReadyStateCalls() []struct { + V uint32 +} { + var calls []struct { + V uint32 + } + mock.lockSystemSetConfComputeGpusReadyState.RLock() + calls = mock.calls.SystemSetConfComputeGpusReadyState + mock.lockSystemSetConfComputeGpusReadyState.RUnlock() + return calls +} + +// SystemSetConfComputeKeyRotationThresholdInfo calls SystemSetConfComputeKeyRotationThresholdInfoFunc. +func (mock *Interface) SystemSetConfComputeKeyRotationThresholdInfo(confComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo) nvml.Return { + if mock.SystemSetConfComputeKeyRotationThresholdInfoFunc == nil { + panic("Interface.SystemSetConfComputeKeyRotationThresholdInfoFunc: method is nil but Interface.SystemSetConfComputeKeyRotationThresholdInfo was just called") + } + callInfo := struct { + ConfComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo + }{ + ConfComputeSetKeyRotationThresholdInfo: confComputeSetKeyRotationThresholdInfo, + } + mock.lockSystemSetConfComputeKeyRotationThresholdInfo.Lock() + mock.calls.SystemSetConfComputeKeyRotationThresholdInfo = append(mock.calls.SystemSetConfComputeKeyRotationThresholdInfo, callInfo) + mock.lockSystemSetConfComputeKeyRotationThresholdInfo.Unlock() + return mock.SystemSetConfComputeKeyRotationThresholdInfoFunc(confComputeSetKeyRotationThresholdInfo) +} + +// SystemSetConfComputeKeyRotationThresholdInfoCalls gets all the calls that were made to SystemSetConfComputeKeyRotationThresholdInfo. +// Check the length with: +// +// len(mockedInterface.SystemSetConfComputeKeyRotationThresholdInfoCalls()) +func (mock *Interface) SystemSetConfComputeKeyRotationThresholdInfoCalls() []struct { + ConfComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo +} { + var calls []struct { + ConfComputeSetKeyRotationThresholdInfo nvml.ConfComputeSetKeyRotationThresholdInfo + } + mock.lockSystemSetConfComputeKeyRotationThresholdInfo.RLock() + calls = mock.calls.SystemSetConfComputeKeyRotationThresholdInfo + mock.lockSystemSetConfComputeKeyRotationThresholdInfo.RUnlock() + return calls +} + +// SystemSetNvlinkBwMode calls SystemSetNvlinkBwModeFunc. +func (mock *Interface) SystemSetNvlinkBwMode(v uint32) nvml.Return { + if mock.SystemSetNvlinkBwModeFunc == nil { + panic("Interface.SystemSetNvlinkBwModeFunc: method is nil but Interface.SystemSetNvlinkBwMode was just called") + } + callInfo := struct { + V uint32 + }{ + V: v, + } + mock.lockSystemSetNvlinkBwMode.Lock() + mock.calls.SystemSetNvlinkBwMode = append(mock.calls.SystemSetNvlinkBwMode, callInfo) + mock.lockSystemSetNvlinkBwMode.Unlock() + return mock.SystemSetNvlinkBwModeFunc(v) +} + +// SystemSetNvlinkBwModeCalls gets all the calls that were made to SystemSetNvlinkBwMode. +// Check the length with: +// +// len(mockedInterface.SystemSetNvlinkBwModeCalls()) +func (mock *Interface) SystemSetNvlinkBwModeCalls() []struct { + V uint32 +} { + var calls []struct { + V uint32 + } + mock.lockSystemSetNvlinkBwMode.RLock() + calls = mock.calls.SystemSetNvlinkBwMode + mock.lockSystemSetNvlinkBwMode.RUnlock() + return calls +} + // UnitGetCount calls UnitGetCountFunc. func (mock *Interface) UnitGetCount() (int, nvml.Return) { if mock.UnitGetCountFunc == nil { @@ -13027,6 +16284,38 @@ func (mock *Interface) VgpuInstanceGetMetadataCalls() []struct { return calls } +// VgpuInstanceGetRuntimeStateSize calls VgpuInstanceGetRuntimeStateSizeFunc. +func (mock *Interface) VgpuInstanceGetRuntimeStateSize(vgpuInstance nvml.VgpuInstance) (nvml.VgpuRuntimeState, nvml.Return) { + if mock.VgpuInstanceGetRuntimeStateSizeFunc == nil { + panic("Interface.VgpuInstanceGetRuntimeStateSizeFunc: method is nil but Interface.VgpuInstanceGetRuntimeStateSize was just called") + } + callInfo := struct { + VgpuInstance nvml.VgpuInstance + }{ + VgpuInstance: vgpuInstance, + } + mock.lockVgpuInstanceGetRuntimeStateSize.Lock() + mock.calls.VgpuInstanceGetRuntimeStateSize = append(mock.calls.VgpuInstanceGetRuntimeStateSize, callInfo) + mock.lockVgpuInstanceGetRuntimeStateSize.Unlock() + return mock.VgpuInstanceGetRuntimeStateSizeFunc(vgpuInstance) +} + +// VgpuInstanceGetRuntimeStateSizeCalls gets all the calls that were made to VgpuInstanceGetRuntimeStateSize. +// Check the length with: +// +// len(mockedInterface.VgpuInstanceGetRuntimeStateSizeCalls()) +func (mock *Interface) VgpuInstanceGetRuntimeStateSizeCalls() []struct { + VgpuInstance nvml.VgpuInstance +} { + var calls []struct { + VgpuInstance nvml.VgpuInstance + } + mock.lockVgpuInstanceGetRuntimeStateSize.RLock() + calls = mock.calls.VgpuInstanceGetRuntimeStateSize + mock.lockVgpuInstanceGetRuntimeStateSize.RUnlock() + return calls +} + // VgpuInstanceGetType calls VgpuInstanceGetTypeFunc. func (mock *Interface) VgpuInstanceGetType(vgpuInstance nvml.VgpuInstance) (nvml.VgpuTypeId, nvml.Return) { if mock.VgpuInstanceGetTypeFunc == nil { @@ -13191,6 +16480,38 @@ func (mock *Interface) VgpuInstanceSetEncoderCapacityCalls() []struct { return calls } +// VgpuTypeGetBAR1Info calls VgpuTypeGetBAR1InfoFunc. +func (mock *Interface) VgpuTypeGetBAR1Info(vgpuTypeId nvml.VgpuTypeId) (nvml.VgpuTypeBar1Info, nvml.Return) { + if mock.VgpuTypeGetBAR1InfoFunc == nil { + panic("Interface.VgpuTypeGetBAR1InfoFunc: method is nil but Interface.VgpuTypeGetBAR1Info was just called") + } + callInfo := struct { + VgpuTypeId nvml.VgpuTypeId + }{ + VgpuTypeId: vgpuTypeId, + } + mock.lockVgpuTypeGetBAR1Info.Lock() + mock.calls.VgpuTypeGetBAR1Info = append(mock.calls.VgpuTypeGetBAR1Info, callInfo) + mock.lockVgpuTypeGetBAR1Info.Unlock() + return mock.VgpuTypeGetBAR1InfoFunc(vgpuTypeId) +} + +// VgpuTypeGetBAR1InfoCalls gets all the calls that were made to VgpuTypeGetBAR1Info. +// Check the length with: +// +// len(mockedInterface.VgpuTypeGetBAR1InfoCalls()) +func (mock *Interface) VgpuTypeGetBAR1InfoCalls() []struct { + VgpuTypeId nvml.VgpuTypeId +} { + var calls []struct { + VgpuTypeId nvml.VgpuTypeId + } + mock.lockVgpuTypeGetBAR1Info.RLock() + calls = mock.calls.VgpuTypeGetBAR1Info + mock.lockVgpuTypeGetBAR1Info.RUnlock() + return calls +} + // VgpuTypeGetCapabilities calls VgpuTypeGetCapabilitiesFunc. func (mock *Interface) VgpuTypeGetCapabilities(vgpuTypeId nvml.VgpuTypeId, vgpuCapability nvml.VgpuCapability) (bool, nvml.Return) { if mock.VgpuTypeGetCapabilitiesFunc == nil { @@ -13455,6 +16776,38 @@ func (mock *Interface) VgpuTypeGetMaxInstancesCalls() []struct { return calls } +// VgpuTypeGetMaxInstancesPerGpuInstance calls VgpuTypeGetMaxInstancesPerGpuInstanceFunc. +func (mock *Interface) VgpuTypeGetMaxInstancesPerGpuInstance(vgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance) nvml.Return { + if mock.VgpuTypeGetMaxInstancesPerGpuInstanceFunc == nil { + panic("Interface.VgpuTypeGetMaxInstancesPerGpuInstanceFunc: method is nil but Interface.VgpuTypeGetMaxInstancesPerGpuInstance was just called") + } + callInfo := struct { + VgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance + }{ + VgpuTypeMaxInstance: vgpuTypeMaxInstance, + } + mock.lockVgpuTypeGetMaxInstancesPerGpuInstance.Lock() + mock.calls.VgpuTypeGetMaxInstancesPerGpuInstance = append(mock.calls.VgpuTypeGetMaxInstancesPerGpuInstance, callInfo) + mock.lockVgpuTypeGetMaxInstancesPerGpuInstance.Unlock() + return mock.VgpuTypeGetMaxInstancesPerGpuInstanceFunc(vgpuTypeMaxInstance) +} + +// VgpuTypeGetMaxInstancesPerGpuInstanceCalls gets all the calls that were made to VgpuTypeGetMaxInstancesPerGpuInstance. +// Check the length with: +// +// len(mockedInterface.VgpuTypeGetMaxInstancesPerGpuInstanceCalls()) +func (mock *Interface) VgpuTypeGetMaxInstancesPerGpuInstanceCalls() []struct { + VgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance +} { + var calls []struct { + VgpuTypeMaxInstance *nvml.VgpuTypeMaxInstance + } + mock.lockVgpuTypeGetMaxInstancesPerGpuInstance.RLock() + calls = mock.calls.VgpuTypeGetMaxInstancesPerGpuInstance + mock.lockVgpuTypeGetMaxInstancesPerGpuInstance.RUnlock() + return calls +} + // VgpuTypeGetMaxInstancesPerVm calls VgpuTypeGetMaxInstancesPerVmFunc. func (mock *Interface) VgpuTypeGetMaxInstancesPerVm(vgpuTypeId nvml.VgpuTypeId) (int, nvml.Return) { if mock.VgpuTypeGetMaxInstancesPerVmFunc == nil { diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgpuinstance.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgpuinstance.go index e0af013..828f423 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgpuinstance.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgpuinstance.go @@ -72,6 +72,9 @@ var _ nvml.VgpuInstance = &VgpuInstance{} // GetMetadataFunc: func() (nvml.VgpuMetadata, nvml.Return) { // panic("mock out the GetMetadata method") // }, +// GetRuntimeStateSizeFunc: func() (nvml.VgpuRuntimeState, nvml.Return) { +// panic("mock out the GetRuntimeStateSize method") +// }, // GetTypeFunc: func() (nvml.VgpuTypeId, nvml.Return) { // panic("mock out the GetType method") // }, @@ -148,6 +151,9 @@ type VgpuInstance struct { // GetMetadataFunc mocks the GetMetadata method. GetMetadataFunc func() (nvml.VgpuMetadata, nvml.Return) + // GetRuntimeStateSizeFunc mocks the GetRuntimeStateSize method. + GetRuntimeStateSizeFunc func() (nvml.VgpuRuntimeState, nvml.Return) + // GetTypeFunc mocks the GetType method. GetTypeFunc func() (nvml.VgpuTypeId, nvml.Return) @@ -221,6 +227,9 @@ type VgpuInstance struct { // GetMetadata holds details about calls to the GetMetadata method. GetMetadata []struct { } + // GetRuntimeStateSize holds details about calls to the GetRuntimeStateSize method. + GetRuntimeStateSize []struct { + } // GetType holds details about calls to the GetType method. GetType []struct { } @@ -257,6 +266,7 @@ type VgpuInstance struct { lockGetLicenseStatus sync.RWMutex lockGetMdevUUID sync.RWMutex lockGetMetadata sync.RWMutex + lockGetRuntimeStateSize sync.RWMutex lockGetType sync.RWMutex lockGetUUID sync.RWMutex lockGetVmDriverVersion sync.RWMutex @@ -755,6 +765,33 @@ func (mock *VgpuInstance) GetMetadataCalls() []struct { return calls } +// GetRuntimeStateSize calls GetRuntimeStateSizeFunc. +func (mock *VgpuInstance) GetRuntimeStateSize() (nvml.VgpuRuntimeState, nvml.Return) { + if mock.GetRuntimeStateSizeFunc == nil { + panic("VgpuInstance.GetRuntimeStateSizeFunc: method is nil but VgpuInstance.GetRuntimeStateSize was just called") + } + callInfo := struct { + }{} + mock.lockGetRuntimeStateSize.Lock() + mock.calls.GetRuntimeStateSize = append(mock.calls.GetRuntimeStateSize, callInfo) + mock.lockGetRuntimeStateSize.Unlock() + return mock.GetRuntimeStateSizeFunc() +} + +// GetRuntimeStateSizeCalls gets all the calls that were made to GetRuntimeStateSize. +// Check the length with: +// +// len(mockedVgpuInstance.GetRuntimeStateSizeCalls()) +func (mock *VgpuInstance) GetRuntimeStateSizeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetRuntimeStateSize.RLock() + calls = mock.calls.GetRuntimeStateSize + mock.lockGetRuntimeStateSize.RUnlock() + return calls +} + // GetType calls GetTypeFunc. func (mock *VgpuInstance) GetType() (nvml.VgpuTypeId, nvml.Return) { if mock.GetTypeFunc == nil { diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgputypeid.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgputypeid.go index c838c3b..467d746 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgputypeid.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/mock/vgputypeid.go @@ -18,12 +18,18 @@ var _ nvml.VgpuTypeId = &VgpuTypeId{} // // // make and configure a mocked nvml.VgpuTypeId // mockedVgpuTypeId := &VgpuTypeId{ +// GetBAR1InfoFunc: func() (nvml.VgpuTypeBar1Info, nvml.Return) { +// panic("mock out the GetBAR1Info method") +// }, // GetCapabilitiesFunc: func(vgpuCapability nvml.VgpuCapability) (bool, nvml.Return) { // panic("mock out the GetCapabilities method") // }, // GetClassFunc: func() (string, nvml.Return) { // panic("mock out the GetClass method") // }, +// GetCreatablePlacementsFunc: func(device nvml.Device) (nvml.VgpuPlacementList, nvml.Return) { +// panic("mock out the GetCreatablePlacements method") +// }, // GetDeviceIDFunc: func() (uint64, uint64, nvml.Return) { // panic("mock out the GetDeviceID method") // }, @@ -54,6 +60,9 @@ var _ nvml.VgpuTypeId = &VgpuTypeId{} // GetResolutionFunc: func(n int) (uint32, uint32, nvml.Return) { // panic("mock out the GetResolution method") // }, +// GetSupportedPlacementsFunc: func(device nvml.Device) (nvml.VgpuPlacementList, nvml.Return) { +// panic("mock out the GetSupportedPlacements method") +// }, // } // // // use mockedVgpuTypeId in code that requires nvml.VgpuTypeId @@ -61,12 +70,18 @@ var _ nvml.VgpuTypeId = &VgpuTypeId{} // // } type VgpuTypeId struct { + // GetBAR1InfoFunc mocks the GetBAR1Info method. + GetBAR1InfoFunc func() (nvml.VgpuTypeBar1Info, nvml.Return) + // GetCapabilitiesFunc mocks the GetCapabilities method. GetCapabilitiesFunc func(vgpuCapability nvml.VgpuCapability) (bool, nvml.Return) // GetClassFunc mocks the GetClass method. GetClassFunc func() (string, nvml.Return) + // GetCreatablePlacementsFunc mocks the GetCreatablePlacements method. + GetCreatablePlacementsFunc func(device nvml.Device) (nvml.VgpuPlacementList, nvml.Return) + // GetDeviceIDFunc mocks the GetDeviceID method. GetDeviceIDFunc func() (uint64, uint64, nvml.Return) @@ -97,8 +112,14 @@ type VgpuTypeId struct { // GetResolutionFunc mocks the GetResolution method. GetResolutionFunc func(n int) (uint32, uint32, nvml.Return) + // GetSupportedPlacementsFunc mocks the GetSupportedPlacements method. + GetSupportedPlacementsFunc func(device nvml.Device) (nvml.VgpuPlacementList, nvml.Return) + // calls tracks calls to the methods. calls struct { + // GetBAR1Info holds details about calls to the GetBAR1Info method. + GetBAR1Info []struct { + } // GetCapabilities holds details about calls to the GetCapabilities method. GetCapabilities []struct { // VgpuCapability is the vgpuCapability argument value. @@ -107,6 +128,11 @@ type VgpuTypeId struct { // GetClass holds details about calls to the GetClass method. GetClass []struct { } + // GetCreatablePlacements holds details about calls to the GetCreatablePlacements method. + GetCreatablePlacements []struct { + // Device is the device argument value. + Device nvml.Device + } // GetDeviceID holds details about calls to the GetDeviceID method. GetDeviceID []struct { } @@ -141,9 +167,16 @@ type VgpuTypeId struct { // N is the n argument value. N int } + // GetSupportedPlacements holds details about calls to the GetSupportedPlacements method. + GetSupportedPlacements []struct { + // Device is the device argument value. + Device nvml.Device + } } + lockGetBAR1Info sync.RWMutex lockGetCapabilities sync.RWMutex lockGetClass sync.RWMutex + lockGetCreatablePlacements sync.RWMutex lockGetDeviceID sync.RWMutex lockGetFrameRateLimit sync.RWMutex lockGetFramebufferSize sync.RWMutex @@ -154,6 +187,34 @@ type VgpuTypeId struct { lockGetName sync.RWMutex lockGetNumDisplayHeads sync.RWMutex lockGetResolution sync.RWMutex + lockGetSupportedPlacements sync.RWMutex +} + +// GetBAR1Info calls GetBAR1InfoFunc. +func (mock *VgpuTypeId) GetBAR1Info() (nvml.VgpuTypeBar1Info, nvml.Return) { + if mock.GetBAR1InfoFunc == nil { + panic("VgpuTypeId.GetBAR1InfoFunc: method is nil but VgpuTypeId.GetBAR1Info was just called") + } + callInfo := struct { + }{} + mock.lockGetBAR1Info.Lock() + mock.calls.GetBAR1Info = append(mock.calls.GetBAR1Info, callInfo) + mock.lockGetBAR1Info.Unlock() + return mock.GetBAR1InfoFunc() +} + +// GetBAR1InfoCalls gets all the calls that were made to GetBAR1Info. +// Check the length with: +// +// len(mockedVgpuTypeId.GetBAR1InfoCalls()) +func (mock *VgpuTypeId) GetBAR1InfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetBAR1Info.RLock() + calls = mock.calls.GetBAR1Info + mock.lockGetBAR1Info.RUnlock() + return calls } // GetCapabilities calls GetCapabilitiesFunc. @@ -215,6 +276,38 @@ func (mock *VgpuTypeId) GetClassCalls() []struct { return calls } +// GetCreatablePlacements calls GetCreatablePlacementsFunc. +func (mock *VgpuTypeId) GetCreatablePlacements(device nvml.Device) (nvml.VgpuPlacementList, nvml.Return) { + if mock.GetCreatablePlacementsFunc == nil { + panic("VgpuTypeId.GetCreatablePlacementsFunc: method is nil but VgpuTypeId.GetCreatablePlacements was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockGetCreatablePlacements.Lock() + mock.calls.GetCreatablePlacements = append(mock.calls.GetCreatablePlacements, callInfo) + mock.lockGetCreatablePlacements.Unlock() + return mock.GetCreatablePlacementsFunc(device) +} + +// GetCreatablePlacementsCalls gets all the calls that were made to GetCreatablePlacements. +// Check the length with: +// +// len(mockedVgpuTypeId.GetCreatablePlacementsCalls()) +func (mock *VgpuTypeId) GetCreatablePlacementsCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockGetCreatablePlacements.RLock() + calls = mock.calls.GetCreatablePlacements + mock.lockGetCreatablePlacements.RUnlock() + return calls +} + // GetDeviceID calls GetDeviceIDFunc. func (mock *VgpuTypeId) GetDeviceID() (uint64, uint64, nvml.Return) { if mock.GetDeviceIDFunc == nil { @@ -494,3 +587,35 @@ func (mock *VgpuTypeId) GetResolutionCalls() []struct { mock.lockGetResolution.RUnlock() return calls } + +// GetSupportedPlacements calls GetSupportedPlacementsFunc. +func (mock *VgpuTypeId) GetSupportedPlacements(device nvml.Device) (nvml.VgpuPlacementList, nvml.Return) { + if mock.GetSupportedPlacementsFunc == nil { + panic("VgpuTypeId.GetSupportedPlacementsFunc: method is nil but VgpuTypeId.GetSupportedPlacements was just called") + } + callInfo := struct { + Device nvml.Device + }{ + Device: device, + } + mock.lockGetSupportedPlacements.Lock() + mock.calls.GetSupportedPlacements = append(mock.calls.GetSupportedPlacements, callInfo) + mock.lockGetSupportedPlacements.Unlock() + return mock.GetSupportedPlacementsFunc(device) +} + +// GetSupportedPlacementsCalls gets all the calls that were made to GetSupportedPlacements. +// Check the length with: +// +// len(mockedVgpuTypeId.GetSupportedPlacementsCalls()) +func (mock *VgpuTypeId) GetSupportedPlacementsCalls() []struct { + Device nvml.Device +} { + var calls []struct { + Device nvml.Device + } + mock.lockGetSupportedPlacements.RLock() + calls = mock.calls.GetSupportedPlacements + mock.lockGetSupportedPlacements.RUnlock() + return calls +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go index 65fcffa..95d67d6 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go @@ -102,6 +102,34 @@ func nvmlSystemGetProcessName(Pid uint32, Name *byte, Length uint32) Return { return __v } +// nvmlSystemGetHicVersion function as declared in nvml/nvml.h +func nvmlSystemGetHicVersion(HwbcCount *uint32, HwbcEntries *HwbcEntry) Return { + cHwbcCount, _ := (*C.uint)(unsafe.Pointer(HwbcCount)), cgoAllocsUnknown + cHwbcEntries, _ := (*C.nvmlHwbcEntry_t)(unsafe.Pointer(HwbcEntries)), cgoAllocsUnknown + __ret := C.nvmlSystemGetHicVersion(cHwbcCount, cHwbcEntries) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetTopologyGpuSet function as declared in nvml/nvml.h +func nvmlSystemGetTopologyGpuSet(CpuNumber uint32, Count *uint32, DeviceArray *nvmlDevice) Return { + cCpuNumber, _ := (C.uint)(CpuNumber), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlSystemGetTopologyGpuSet(cCpuNumber, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetDriverBranch function as declared in nvml/nvml.h +func nvmlSystemGetDriverBranch(BranchInfo *SystemDriverBranchInfo, Length uint32) Return { + cBranchInfo, _ := (*C.nvmlSystemDriverBranchInfo_t)(unsafe.Pointer(BranchInfo)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetDriverBranch(cBranchInfo, cLength) + __v := (Return)(__ret) + return __v +} + // nvmlUnitGetCount function as declared in nvml/nvml.h func nvmlUnitGetCount(UnitCount *uint32) Return { cUnitCount, _ := (*C.uint)(unsafe.Pointer(UnitCount)), cgoAllocsUnknown @@ -175,15 +203,6 @@ func nvmlUnitGetDevices(nvmlUnit nvmlUnit, DeviceCount *uint32, Devices *nvmlDev return __v } -// nvmlSystemGetHicVersion function as declared in nvml/nvml.h -func nvmlSystemGetHicVersion(HwbcCount *uint32, HwbcEntries *HwbcEntry) Return { - cHwbcCount, _ := (*C.uint)(unsafe.Pointer(HwbcCount)), cgoAllocsUnknown - cHwbcEntries, _ := (*C.nvmlHwbcEntry_t)(unsafe.Pointer(HwbcEntries)), cgoAllocsUnknown - __ret := C.nvmlSystemGetHicVersion(cHwbcCount, cHwbcEntries) - __v := (Return)(__ret) - return __v -} - // nvmlDeviceGetCount_v2 function as declared in nvml/nvml.h func nvmlDeviceGetCount_v2(DeviceCount *uint32) Return { cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown @@ -228,6 +247,15 @@ func nvmlDeviceGetHandleByUUID(Uuid string, nvmlDevice *nvmlDevice) Return { return __v } +// nvmlDeviceGetHandleByUUIDV function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByUUIDV(Uuid *UUID, nvmlDevice *nvmlDevice) Return { + cUuid, _ := (*C.nvmlUUID_t)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cnvmlDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(nvmlDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByUUIDV(cUuid, cnvmlDevice) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetHandleByPciBusId_v2 function as declared in nvml/nvml.h func nvmlDeviceGetHandleByPciBusId_v2(PciBusId string, nvmlDevice *nvmlDevice) Return { cPciBusId, _ := unpackPCharString(PciBusId) @@ -275,6 +303,24 @@ func nvmlDeviceGetSerial(nvmlDevice nvmlDevice, Serial *byte, Length uint32) Ret return __v } +// nvmlDeviceGetModuleId function as declared in nvml/nvml.h +func nvmlDeviceGetModuleId(nvmlDevice nvmlDevice, ModuleId *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cModuleId, _ := (*C.uint)(unsafe.Pointer(ModuleId)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetModuleId(cnvmlDevice, cModuleId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetC2cModeInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetC2cModeInfoV(nvmlDevice nvmlDevice, C2cModeInfo *C2cModeInfo_v1) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cC2cModeInfo, _ := (*C.nvmlC2cModeInfo_v1_t)(unsafe.Pointer(C2cModeInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetC2cModeInfoV(cnvmlDevice, cC2cModeInfo) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetMemoryAffinity function as declared in nvml/nvml.h func nvmlDeviceGetMemoryAffinity(nvmlDevice nvmlDevice, NodeSetSize uint32, NodeSet *uint, Scope AffinityScope) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -323,6 +369,15 @@ func nvmlDeviceClearCpuAffinity(nvmlDevice nvmlDevice) Return { return __v } +// nvmlDeviceGetNumaNodeId function as declared in nvml/nvml.h +func nvmlDeviceGetNumaNodeId(nvmlDevice nvmlDevice, Node *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cNode, _ := (*C.uint)(unsafe.Pointer(Node)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumaNodeId(cnvmlDevice, cNode) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetTopologyCommonAncestor function as declared in nvml/nvml.h func nvmlDeviceGetTopologyCommonAncestor(Device1 nvmlDevice, Device2 nvmlDevice, PathInfo *GpuTopologyLevel) Return { cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown @@ -344,16 +399,6 @@ func nvmlDeviceGetTopologyNearestGpus(nvmlDevice nvmlDevice, Level GpuTopologyLe return __v } -// nvmlSystemGetTopologyGpuSet function as declared in nvml/nvml.h -func nvmlSystemGetTopologyGpuSet(CpuNumber uint32, Count *uint32, DeviceArray *nvmlDevice) Return { - cCpuNumber, _ := (C.uint)(CpuNumber), cgoAllocsUnknown - cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown - cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown - __ret := C.nvmlSystemGetTopologyGpuSet(cCpuNumber, cCount, cDeviceArray) - __v := (Return)(__ret) - return __v -} - // nvmlDeviceGetP2PStatus function as declared in nvml/nvml.h func nvmlDeviceGetP2PStatus(Device1 nvmlDevice, Device2 nvmlDevice, P2pIndex GpuP2PCapsIndex, P2pStatus *GpuP2PStatus) Return { cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown @@ -375,16 +420,6 @@ func nvmlDeviceGetUUID(nvmlDevice nvmlDevice, Uuid *byte, Length uint32) Return return __v } -// nvmlVgpuInstanceGetMdevUUID function as declared in nvml/nvml.h -func nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance nvmlVgpuInstance, MdevUuid *byte, Size uint32) Return { - cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown - cMdevUuid, _ := (*C.char)(unsafe.Pointer(MdevUuid)), cgoAllocsUnknown - cSize, _ := (C.uint)(Size), cgoAllocsUnknown - __ret := C.nvmlVgpuInstanceGetMdevUUID(cnvmlVgpuInstance, cMdevUuid, cSize) - __v := (Return)(__ret) - return __v -} - // nvmlDeviceGetMinorNumber function as declared in nvml/nvml.h func nvmlDeviceGetMinorNumber(nvmlDevice nvmlDevice, MinorNumber *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -442,6 +477,16 @@ func nvmlDeviceValidateInforom(nvmlDevice nvmlDevice) Return { return __v } +// nvmlDeviceGetLastBBXFlushTime function as declared in nvml/nvml.h +func nvmlDeviceGetLastBBXFlushTime(nvmlDevice nvmlDevice, Timestamp *uint64, DurationUs *uint) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cTimestamp, _ := (*C.ulonglong)(unsafe.Pointer(Timestamp)), cgoAllocsUnknown + cDurationUs, _ := (*C.ulong)(unsafe.Pointer(DurationUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetLastBBXFlushTime(cnvmlDevice, cTimestamp, cDurationUs) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetDisplayMode function as declared in nvml/nvml.h func nvmlDeviceGetDisplayMode(nvmlDevice nvmlDevice, Display *EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -469,6 +514,15 @@ func nvmlDeviceGetPersistenceMode(nvmlDevice nvmlDevice, Mode *EnableState) Retu return __v } +// nvmlDeviceGetPciInfoExt function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfoExt(nvmlDevice nvmlDevice, Pci *PciInfoExt) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfoExt_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfoExt(cnvmlDevice, cPci) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetPciInfo_v3 function as declared in nvml/nvml.h func nvmlDeviceGetPciInfo_v3(nvmlDevice nvmlDevice, Pci *PciInfo) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -562,6 +616,15 @@ func nvmlDeviceGetMaxClockInfo(nvmlDevice nvmlDevice, _type ClockType, Clock *ui return __v } +// nvmlDeviceGetGpcClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetGpcClkVfOffset(nvmlDevice nvmlDevice, Offset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpcClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetApplicationsClock function as declared in nvml/nvml.h func nvmlDeviceGetApplicationsClock(nvmlDevice nvmlDevice, ClockType ClockType, ClockMHz *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -582,14 +645,6 @@ func nvmlDeviceGetDefaultApplicationsClock(nvmlDevice nvmlDevice, ClockType Cloc return __v } -// nvmlDeviceResetApplicationsClocks function as declared in nvml/nvml.h -func nvmlDeviceResetApplicationsClocks(nvmlDevice nvmlDevice) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - __ret := C.nvmlDeviceResetApplicationsClocks(cnvmlDevice) - __v := (Return)(__ret) - return __v -} - // nvmlDeviceGetClock function as declared in nvml/nvml.h func nvmlDeviceGetClock(nvmlDevice nvmlDevice, ClockType ClockType, ClockId ClockId, ClockMHz *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -642,25 +697,6 @@ func nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, IsEnabled *Ena return __v } -// nvmlDeviceSetAutoBoostedClocksEnabled function as declared in nvml/nvml.h -func nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, Enabled EnableState) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown - __ret := C.nvmlDeviceSetAutoBoostedClocksEnabled(cnvmlDevice, cEnabled) - __v := (Return)(__ret) - return __v -} - -// nvmlDeviceSetDefaultAutoBoostedClocksEnabled function as declared in nvml/nvml.h -func nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, Enabled EnableState, Flags uint32) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown - cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown - __ret := C.nvmlDeviceSetDefaultAutoBoostedClocksEnabled(cnvmlDevice, cEnabled, cFlags) - __v := (Return)(__ret) - return __v -} - // nvmlDeviceGetFanSpeed function as declared in nvml/nvml.h func nvmlDeviceGetFanSpeed(nvmlDevice nvmlDevice, Speed *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -680,21 +716,21 @@ func nvmlDeviceGetFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32, Speed *uint32) return __v } -// nvmlDeviceGetTargetFanSpeed function as declared in nvml/nvml.h -func nvmlDeviceGetTargetFanSpeed(nvmlDevice nvmlDevice, Fan uint32, TargetSpeed *uint32) Return { +// nvmlDeviceGetFanSpeedRPM function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeedRPM(nvmlDevice nvmlDevice, FanSpeed *FanSpeedInfo) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cFan, _ := (C.uint)(Fan), cgoAllocsUnknown - cTargetSpeed, _ := (*C.uint)(unsafe.Pointer(TargetSpeed)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetTargetFanSpeed(cnvmlDevice, cFan, cTargetSpeed) + cFanSpeed, _ := (*C.nvmlFanSpeedInfo_t)(unsafe.Pointer(FanSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeedRPM(cnvmlDevice, cFanSpeed) __v := (Return)(__ret) return __v } -// nvmlDeviceSetDefaultFanSpeed_v2 function as declared in nvml/nvml.h -func nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32) Return { +// nvmlDeviceGetTargetFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetTargetFanSpeed(nvmlDevice nvmlDevice, Fan uint32, TargetSpeed *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown cFan, _ := (C.uint)(Fan), cgoAllocsUnknown - __ret := C.nvmlDeviceSetDefaultFanSpeed_v2(cnvmlDevice, cFan) + cTargetSpeed, _ := (*C.uint)(unsafe.Pointer(TargetSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTargetFanSpeed(cnvmlDevice, cFan, cTargetSpeed) __v := (Return)(__ret) return __v } @@ -719,16 +755,6 @@ func nvmlDeviceGetFanControlPolicy_v2(nvmlDevice nvmlDevice, Fan uint32, Policy return __v } -// nvmlDeviceSetFanControlPolicy function as declared in nvml/nvml.h -func nvmlDeviceSetFanControlPolicy(nvmlDevice nvmlDevice, Fan uint32, Policy FanControlPolicy) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cFan, _ := (C.uint)(Fan), cgoAllocsUnknown - cPolicy, _ := (C.nvmlFanControlPolicy_t)(Policy), cgoAllocsUnknown - __ret := C.nvmlDeviceSetFanControlPolicy(cnvmlDevice, cFan, cPolicy) - __v := (Return)(__ret) - return __v -} - // nvmlDeviceGetNumFans function as declared in nvml/nvml.h func nvmlDeviceGetNumFans(nvmlDevice nvmlDevice, NumFans *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -748,6 +774,24 @@ func nvmlDeviceGetTemperature(nvmlDevice nvmlDevice, SensorType TemperatureSenso return __v } +// nvmlDeviceGetCoolerInfo function as declared in nvml/nvml.h +func nvmlDeviceGetCoolerInfo(nvmlDevice nvmlDevice, CoolerInfo *CoolerInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCoolerInfo, _ := (*C.nvmlCoolerInfo_t)(unsafe.Pointer(CoolerInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCoolerInfo(cnvmlDevice, cCoolerInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperatureV function as declared in nvml/nvml.h +func nvmlDeviceGetTemperatureV(nvmlDevice nvmlDevice, Temperature *Temperature) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cTemperature, _ := (*C.nvmlTemperature_t)(unsafe.Pointer(Temperature)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperatureV(cnvmlDevice, cTemperature) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetTemperatureThreshold function as declared in nvml/nvml.h func nvmlDeviceGetTemperatureThreshold(nvmlDevice nvmlDevice, ThresholdType TemperatureThresholds, Temp *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -758,12 +802,11 @@ func nvmlDeviceGetTemperatureThreshold(nvmlDevice nvmlDevice, ThresholdType Temp return __v } -// nvmlDeviceSetTemperatureThreshold function as declared in nvml/nvml.h -func nvmlDeviceSetTemperatureThreshold(nvmlDevice nvmlDevice, ThresholdType TemperatureThresholds, Temp *int32) Return { +// nvmlDeviceGetMarginTemperature function as declared in nvml/nvml.h +func nvmlDeviceGetMarginTemperature(nvmlDevice nvmlDevice, MarginTempInfo *MarginTemperature) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown - cTemp, _ := (*C.int)(unsafe.Pointer(Temp)), cgoAllocsUnknown - __ret := C.nvmlDeviceSetTemperatureThreshold(cnvmlDevice, cThresholdType, cTemp) + cMarginTempInfo, _ := (*C.nvmlMarginTemperature_t)(unsafe.Pointer(MarginTempInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMarginTemperature(cnvmlDevice, cMarginTempInfo) __v := (Return)(__ret) return __v } @@ -787,6 +830,15 @@ func nvmlDeviceGetPerformanceState(nvmlDevice nvmlDevice, PState *Pstates) Retur return __v } +// nvmlDeviceGetCurrentClocksEventReasons function as declared in nvml/nvml.h +func nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice nvmlDevice, ClocksEventReasons *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cClocksEventReasons, _ := (*C.ulonglong)(unsafe.Pointer(ClocksEventReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrentClocksEventReasons(cnvmlDevice, cClocksEventReasons) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetCurrentClocksThrottleReasons function as declared in nvml/nvml.h func nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice nvmlDevice, ClocksThrottleReasons *uint64) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -796,6 +848,15 @@ func nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice nvmlDevice, ClocksThro return __v } +// nvmlDeviceGetSupportedClocksEventReasons function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice nvmlDevice, SupportedClocksEventReasons *uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSupportedClocksEventReasons, _ := (*C.ulonglong)(unsafe.Pointer(SupportedClocksEventReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedClocksEventReasons(cnvmlDevice, cSupportedClocksEventReasons) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetSupportedClocksThrottleReasons function as declared in nvml/nvml.h func nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice nvmlDevice, SupportedClocksThrottleReasons *uint64) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -814,6 +875,102 @@ func nvmlDeviceGetPowerState(nvmlDevice nvmlDevice, PState *Pstates) Return { return __v } +// nvmlDeviceGetDynamicPstatesInfo function as declared in nvml/nvml.h +func nvmlDeviceGetDynamicPstatesInfo(nvmlDevice nvmlDevice, PDynamicPstatesInfo *GpuDynamicPstatesInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPDynamicPstatesInfo, _ := (*C.nvmlGpuDynamicPstatesInfo_t)(unsafe.Pointer(PDynamicPstatesInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDynamicPstatesInfo(cnvmlDevice, cPDynamicPstatesInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetMemClkVfOffset(nvmlDevice nvmlDevice, Offset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinMaxClockOfPState function as declared in nvml/nvml.h +func nvmlDeviceGetMinMaxClockOfPState(nvmlDevice nvmlDevice, _type ClockType, Pstate Pstates, MinClockMHz *uint32, MaxClockMHz *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cPstate, _ := (C.nvmlPstates_t)(Pstate), cgoAllocsUnknown + cMinClockMHz, _ := (*C.uint)(unsafe.Pointer(MinClockMHz)), cgoAllocsUnknown + cMaxClockMHz, _ := (*C.uint)(unsafe.Pointer(MaxClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinMaxClockOfPState(cnvmlDevice, c_type, cPstate, cMinClockMHz, cMaxClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedPerformanceStates function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedPerformanceStates(nvmlDevice nvmlDevice, Pstates *Pstates, Size uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPstates, _ := (*C.nvmlPstates_t)(unsafe.Pointer(Pstates)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedPerformanceStates(cnvmlDevice, cPstates, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpcClkMinMaxVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice nvmlDevice, MinOffset *int32, MaxOffset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown + cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpcClkMinMaxVfOffset(cnvmlDevice, cMinOffset, cMaxOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemClkMinMaxVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice nvmlDevice, MinOffset *int32, MaxOffset *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown + cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemClkMinMaxVfOffset(cnvmlDevice, cMinOffset, cMaxOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClockOffsets function as declared in nvml/nvml.h +func nvmlDeviceGetClockOffsets(nvmlDevice nvmlDevice, Info *ClockOffset) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlClockOffset_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClockOffsets(cnvmlDevice, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetClockOffsets function as declared in nvml/nvml.h +func nvmlDeviceSetClockOffsets(nvmlDevice nvmlDevice, Info *ClockOffset) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlClockOffset_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetClockOffsets(cnvmlDevice, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPerformanceModes function as declared in nvml/nvml.h +func nvmlDeviceGetPerformanceModes(nvmlDevice nvmlDevice, PerfModes *DevicePerfModes) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPerfModes, _ := (*C.nvmlDevicePerfModes_t)(unsafe.Pointer(PerfModes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPerformanceModes(cnvmlDevice, cPerfModes) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrentClockFreqs function as declared in nvml/nvml.h +func nvmlDeviceGetCurrentClockFreqs(nvmlDevice nvmlDevice, CurrentClockFreqs *DeviceCurrentClockFreqs) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrentClockFreqs, _ := (*C.nvmlDeviceCurrentClockFreqs_t)(unsafe.Pointer(CurrentClockFreqs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrentClockFreqs(cnvmlDevice, cCurrentClockFreqs) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetPowerManagementMode function as declared in nvml/nvml.h func nvmlDeviceGetPowerManagementMode(nvmlDevice nvmlDevice, Mode *EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -925,6 +1082,25 @@ func nvmlDeviceGetCudaComputeCapability(nvmlDevice nvmlDevice, Major *int32, Min return __v } +// nvmlDeviceGetDramEncryptionMode function as declared in nvml/nvml.h +func nvmlDeviceGetDramEncryptionMode(nvmlDevice nvmlDevice, Current *DramEncryptionInfo, Pending *DramEncryptionInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlDramEncryptionInfo_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlDramEncryptionInfo_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDramEncryptionMode(cnvmlDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDramEncryptionMode function as declared in nvml/nvml.h +func nvmlDeviceSetDramEncryptionMode(nvmlDevice nvmlDevice, DramEncryption *DramEncryptionInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cDramEncryption, _ := (*C.nvmlDramEncryptionInfo_t)(unsafe.Pointer(DramEncryption)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDramEncryptionMode(cnvmlDevice, cDramEncryption) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetEccMode function as declared in nvml/nvml.h func nvmlDeviceGetEccMode(nvmlDevice nvmlDevice, Current *EnableState, Pending *EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -1056,6 +1232,26 @@ func nvmlDeviceGetDecoderUtilization(nvmlDevice nvmlDevice, Utilization *uint32, return __v } +// nvmlDeviceGetJpgUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetJpgUtilization(nvmlDevice nvmlDevice, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetJpgUtilization(cnvmlDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetOfaUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetOfaUtilization(nvmlDevice nvmlDevice, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetOfaUtilization(cnvmlDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetFBCStats function as declared in nvml/nvml.h func nvmlDeviceGetFBCStats(nvmlDevice nvmlDevice, FbcStats *FBCStats) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -1075,12 +1271,12 @@ func nvmlDeviceGetFBCSessions(nvmlDevice nvmlDevice, SessionCount *uint32, Sessi return __v } -// nvmlDeviceGetDriverModel function as declared in nvml/nvml.h -func nvmlDeviceGetDriverModel(nvmlDevice nvmlDevice, Current *DriverModel, Pending *DriverModel) Return { +// nvmlDeviceGetDriverModel_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetDriverModel_v2(nvmlDevice nvmlDevice, Current *DriverModel, Pending *DriverModel) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown cCurrent, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Current)), cgoAllocsUnknown cPending, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetDriverModel(cnvmlDevice, cCurrent, cPending) + __ret := C.nvmlDeviceGetDriverModel_v2(cnvmlDevice, cCurrent, cPending) __v := (Return)(__ret) return __v } @@ -1134,6 +1330,15 @@ func nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice nvmlDevice, InfoCount return __v } +// nvmlDeviceGetRunningProcessDetailList function as declared in nvml/nvml.h +func nvmlDeviceGetRunningProcessDetailList(nvmlDevice nvmlDevice, Plist *ProcessDetailList) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPlist, _ := (*C.nvmlProcessDetailList_t)(unsafe.Pointer(Plist)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRunningProcessDetailList(cnvmlDevice, cPlist) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceOnSameBoard function as declared in nvml/nvml.h func nvmlDeviceOnSameBoard(Device1 nvmlDevice, Device2 nvmlDevice, OnSameBoard *int32) Return { cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown @@ -1249,6 +1454,162 @@ func nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice nvmlDevice, AdaptiveClockSt return __v } +// nvmlDeviceGetBusType function as declared in nvml/nvml.h +func nvmlDeviceGetBusType(nvmlDevice nvmlDevice, _type *BusType) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBusType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBusType(cnvmlDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuFabricInfo function as declared in nvml/nvml.h +func nvmlDeviceGetGpuFabricInfo(nvmlDevice nvmlDevice, GpuFabricInfo *GpuFabricInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuFabricInfo, _ := (*C.nvmlGpuFabricInfo_t)(unsafe.Pointer(GpuFabricInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuFabricInfo(cnvmlDevice, cGpuFabricInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuFabricInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetGpuFabricInfoV(nvmlDevice nvmlDevice, GpuFabricInfo *GpuFabricInfoV) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuFabricInfo, _ := (*C.nvmlGpuFabricInfoV_t)(unsafe.Pointer(GpuFabricInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuFabricInfoV(cnvmlDevice, cGpuFabricInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeCapabilities function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeCapabilities(Capabilities *ConfComputeSystemCaps) Return { + cCapabilities, _ := (*C.nvmlConfComputeSystemCaps_t)(unsafe.Pointer(Capabilities)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeCapabilities(cCapabilities) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeState function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeState(State *ConfComputeSystemState) Return { + cState, _ := (*C.nvmlConfComputeSystemState_t)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeState(cState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeMemSizeInfo function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice nvmlDevice, MemInfo *ConfComputeMemSizeInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemInfo, _ := (*C.nvmlConfComputeMemSizeInfo_t)(unsafe.Pointer(MemInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeMemSizeInfo(cnvmlDevice, cMemInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeGpusReadyState function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeGpusReadyState(IsAcceptingWork *uint32) Return { + cIsAcceptingWork, _ := (*C.uint)(unsafe.Pointer(IsAcceptingWork)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeGpusReadyState(cIsAcceptingWork) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeProtectedMemoryUsage function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice nvmlDevice, Memory *Memory) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeProtectedMemoryUsage(cnvmlDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeGpuCertificate function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice nvmlDevice, GpuCert *ConfComputeGpuCertificate) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuCert, _ := (*C.nvmlConfComputeGpuCertificate_t)(unsafe.Pointer(GpuCert)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeGpuCertificate(cnvmlDevice, cGpuCert) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetConfComputeGpuAttestationReport function as declared in nvml/nvml.h +func nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice nvmlDevice, GpuAtstReport *ConfComputeGpuAttestationReport) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGpuAtstReport, _ := (*C.nvmlConfComputeGpuAttestationReport_t)(unsafe.Pointer(GpuAtstReport)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetConfComputeGpuAttestationReport(cnvmlDevice, cGpuAtstReport) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeKeyRotationThresholdInfo function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeKeyRotationThresholdInfo(PKeyRotationThrInfo *ConfComputeGetKeyRotationThresholdInfo) Return { + cPKeyRotationThrInfo, _ := (*C.nvmlConfComputeGetKeyRotationThresholdInfo_t)(unsafe.Pointer(PKeyRotationThrInfo)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeKeyRotationThresholdInfo(cPKeyRotationThrInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetConfComputeUnprotectedMemSize function as declared in nvml/nvml.h +func nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice nvmlDevice, SizeKiB uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSizeKiB, _ := (C.ulonglong)(SizeKiB), cgoAllocsUnknown + __ret := C.nvmlDeviceSetConfComputeUnprotectedMemSize(cnvmlDevice, cSizeKiB) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemSetConfComputeGpusReadyState function as declared in nvml/nvml.h +func nvmlSystemSetConfComputeGpusReadyState(IsAcceptingWork uint32) Return { + cIsAcceptingWork, _ := (C.uint)(IsAcceptingWork), cgoAllocsUnknown + __ret := C.nvmlSystemSetConfComputeGpusReadyState(cIsAcceptingWork) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemSetConfComputeKeyRotationThresholdInfo function as declared in nvml/nvml.h +func nvmlSystemSetConfComputeKeyRotationThresholdInfo(PKeyRotationThrInfo *ConfComputeSetKeyRotationThresholdInfo) Return { + cPKeyRotationThrInfo, _ := (*C.nvmlConfComputeSetKeyRotationThresholdInfo_t)(unsafe.Pointer(PKeyRotationThrInfo)), cgoAllocsUnknown + __ret := C.nvmlSystemSetConfComputeKeyRotationThresholdInfo(cPKeyRotationThrInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetConfComputeSettings function as declared in nvml/nvml.h +func nvmlSystemGetConfComputeSettings(Settings *SystemConfComputeSettings) Return { + cSettings, _ := (*C.nvmlSystemConfComputeSettings_t)(unsafe.Pointer(Settings)), cgoAllocsUnknown + __ret := C.nvmlSystemGetConfComputeSettings(cSettings) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGspFirmwareVersion function as declared in nvml/nvml.h +func nvmlDeviceGetGspFirmwareVersion(nvmlDevice nvmlDevice, Version *byte) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGspFirmwareVersion(cnvmlDevice, cVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGspFirmwareMode function as declared in nvml/nvml.h +func nvmlDeviceGetGspFirmwareMode(nvmlDevice nvmlDevice, IsEnabled *uint32, DefaultMode *uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cIsEnabled, _ := (*C.uint)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown + cDefaultMode, _ := (*C.uint)(unsafe.Pointer(DefaultMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGspFirmwareMode(cnvmlDevice, cIsEnabled, cDefaultMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSramEccErrorStatus function as declared in nvml/nvml.h +func nvmlDeviceGetSramEccErrorStatus(nvmlDevice nvmlDevice, Status *EccSramErrorStatus) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cStatus, _ := (*C.nvmlEccSramErrorStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSramEccErrorStatus(cnvmlDevice, cStatus) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetAccountingMode function as declared in nvml/nvml.h func nvmlDeviceGetAccountingMode(nvmlDevice nvmlDevice, Mode *EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -1349,6 +1710,44 @@ func nvmlDeviceGetArchitecture(nvmlDevice nvmlDevice, Arch *DeviceArchitecture) return __v } +// nvmlDeviceGetClkMonStatus function as declared in nvml/nvml.h +func nvmlDeviceGetClkMonStatus(nvmlDevice nvmlDevice, Status *ClkMonStatus) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cStatus, _ := (*C.nvmlClkMonStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClkMonStatus(cnvmlDevice, cStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetProcessUtilization(nvmlDevice nvmlDevice, Utilization *ProcessUtilizationSample, ProcessSamplesCount *uint32, LastSeenTimeStamp uint64) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlProcessUtilizationSample_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(ProcessSamplesCount)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + __ret := C.nvmlDeviceGetProcessUtilization(cnvmlDevice, cUtilization, cProcessSamplesCount, cLastSeenTimeStamp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetProcessesUtilizationInfo function as declared in nvml/nvml.h +func nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice nvmlDevice, ProcesesUtilInfo *ProcessesUtilizationInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cProcesesUtilInfo, _ := (*C.nvmlProcessesUtilizationInfo_t)(unsafe.Pointer(ProcesesUtilInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetProcessesUtilizationInfo(cnvmlDevice, cProcesesUtilInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPlatformInfo function as declared in nvml/nvml.h +func nvmlDeviceGetPlatformInfo(nvmlDevice nvmlDevice, PlatformInfo *PlatformInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPlatformInfo, _ := (*C.nvmlPlatformInfo_t)(unsafe.Pointer(PlatformInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPlatformInfo(cnvmlDevice, cPlatformInfo) + __v := (Return)(__ret) + return __v +} + // nvmlUnitSetLedState function as declared in nvml/nvml.h func nvmlUnitSetLedState(nvmlUnit nvmlUnit, Color LedColor) Return { cnvmlUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&nvmlUnit)), cgoAllocsUnknown @@ -1450,11 +1849,58 @@ func nvmlDeviceSetApplicationsClocks(nvmlDevice nvmlDevice, MemClockMHz uint32, return __v } -// nvmlDeviceGetClkMonStatus function as declared in nvml/nvml.h -func nvmlDeviceGetClkMonStatus(nvmlDevice nvmlDevice, Status *ClkMonStatus) Return { +// nvmlDeviceResetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceResetApplicationsClocks(nvmlDevice nvmlDevice) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cStatus, _ := (*C.nvmlClkMonStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetClkMonStatus(cnvmlDevice, cStatus) + __ret := C.nvmlDeviceResetApplicationsClocks(cnvmlDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, Enabled EnableState) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAutoBoostedClocksEnabled(cnvmlDevice, cEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice nvmlDevice, Enabled EnableState, Flags uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultAutoBoostedClocksEnabled(cnvmlDevice, cEnabled, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultFanSpeed_v2(cnvmlDevice, cFan) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetFanControlPolicy function as declared in nvml/nvml.h +func nvmlDeviceSetFanControlPolicy(nvmlDevice nvmlDevice, Fan uint32, Policy FanControlPolicy) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cPolicy, _ := (C.nvmlFanControlPolicy_t)(Policy), cgoAllocsUnknown + __ret := C.nvmlDeviceSetFanControlPolicy(cnvmlDevice, cFan, cPolicy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetTemperatureThreshold(nvmlDevice nvmlDevice, ThresholdType TemperatureThresholds, Temp *int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.int)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetTemperatureThreshold(cnvmlDevice, cThresholdType, cTemp) __v := (Return)(__ret) return __v } @@ -1487,6 +1933,34 @@ func nvmlDeviceSetAPIRestriction(nvmlDevice nvmlDevice, ApiType RestrictedAPI, I return __v } +// nvmlDeviceSetFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32, Speed uint32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cSpeed, _ := (C.uint)(Speed), cgoAllocsUnknown + __ret := C.nvmlDeviceSetFanSpeed_v2(cnvmlDevice, cFan, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpcClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceSetGpcClkVfOffset(nvmlDevice nvmlDevice, Offset int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (C.int)(Offset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpcClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMemClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceSetMemClkVfOffset(nvmlDevice nvmlDevice, Offset int32) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cOffset, _ := (C.int)(Offset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMemClkVfOffset(cnvmlDevice, cOffset) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceSetAccountingMode function as declared in nvml/nvml.h func nvmlDeviceSetAccountingMode(nvmlDevice nvmlDevice, Mode EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -1504,6 +1978,15 @@ func nvmlDeviceClearAccountingPids(nvmlDevice nvmlDevice) Return { return __v } +// nvmlDeviceSetPowerManagementLimit_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice nvmlDevice, PowerValue *PowerValue_v2) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPowerValue, _ := (*C.nvmlPowerValue_v2_t)(unsafe.Pointer(PowerValue)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPowerManagementLimit_v2(cnvmlDevice, cPowerValue) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetNvLinkState function as declared in nvml/nvml.h func nvmlDeviceGetNvLinkState(nvmlDevice nvmlDevice, Link uint32, IsActive *EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -1631,6 +2114,58 @@ func nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice nvmlDevice, Link uint32, PNv return __v } +// nvmlDeviceSetNvLinkDeviceLowPowerThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice nvmlDevice, Info *NvLinkPowerThres) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlNvLinkPowerThres_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvLinkDeviceLowPowerThreshold(cnvmlDevice, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemSetNvlinkBwMode function as declared in nvml/nvml.h +func nvmlSystemSetNvlinkBwMode(NvlinkBwMode uint32) Return { + cNvlinkBwMode, _ := (C.uint)(NvlinkBwMode), cgoAllocsUnknown + __ret := C.nvmlSystemSetNvlinkBwMode(cNvlinkBwMode) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetNvlinkBwMode function as declared in nvml/nvml.h +func nvmlSystemGetNvlinkBwMode(NvlinkBwMode *uint32) Return { + cNvlinkBwMode, _ := (*C.uint)(unsafe.Pointer(NvlinkBwMode)), cgoAllocsUnknown + __ret := C.nvmlSystemGetNvlinkBwMode(cNvlinkBwMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvlinkSupportedBwModes function as declared in nvml/nvml.h +func nvmlDeviceGetNvlinkSupportedBwModes(nvmlDevice nvmlDevice, SupportedBwMode *NvlinkSupportedBwModes) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSupportedBwMode, _ := (*C.nvmlNvlinkSupportedBwModes_t)(unsafe.Pointer(SupportedBwMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvlinkSupportedBwModes(cnvmlDevice, cSupportedBwMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvlinkBwMode function as declared in nvml/nvml.h +func nvmlDeviceGetNvlinkBwMode(nvmlDevice nvmlDevice, GetBwMode *NvlinkGetBwMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cGetBwMode, _ := (*C.nvmlNvlinkGetBwMode_t)(unsafe.Pointer(GetBwMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvlinkBwMode(cnvmlDevice, cGetBwMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetNvlinkBwMode function as declared in nvml/nvml.h +func nvmlDeviceSetNvlinkBwMode(nvmlDevice nvmlDevice, SetBwMode *NvlinkSetBwMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cSetBwMode, _ := (*C.nvmlNvlinkSetBwMode_t)(unsafe.Pointer(SetBwMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvlinkBwMode(cnvmlDevice, cSetBwMode) + __v := (Return)(__ret) + return __v +} + // nvmlEventSetCreate function as declared in nvml/nvml.h func nvmlEventSetCreate(Set *nvmlEventSet) Return { cSet, _ := (*C.nvmlEventSet_t)(unsafe.Pointer(Set)), cgoAllocsUnknown @@ -1676,6 +2211,38 @@ func nvmlEventSetFree(Set nvmlEventSet) Return { return __v } +// nvmlSystemEventSetCreate function as declared in nvml/nvml.h +func nvmlSystemEventSetCreate(Request *SystemEventSetCreateRequest) Return { + cRequest, _ := (*C.nvmlSystemEventSetCreateRequest_t)(unsafe.Pointer(Request)), cgoAllocsUnknown + __ret := C.nvmlSystemEventSetCreate(cRequest) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemEventSetFree function as declared in nvml/nvml.h +func nvmlSystemEventSetFree(Request *SystemEventSetFreeRequest) Return { + cRequest, _ := (*C.nvmlSystemEventSetFreeRequest_t)(unsafe.Pointer(Request)), cgoAllocsUnknown + __ret := C.nvmlSystemEventSetFree(cRequest) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemRegisterEvents function as declared in nvml/nvml.h +func nvmlSystemRegisterEvents(Request *SystemRegisterEventRequest) Return { + cRequest, _ := (*C.nvmlSystemRegisterEventRequest_t)(unsafe.Pointer(Request)), cgoAllocsUnknown + __ret := C.nvmlSystemRegisterEvents(cRequest) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemEventSetWait function as declared in nvml/nvml.h +func nvmlSystemEventSetWait(Request *SystemEventSetWaitRequest) Return { + cRequest, _ := (*C.nvmlSystemEventSetWaitRequest_t)(unsafe.Pointer(Request)), cgoAllocsUnknown + __ret := C.nvmlSystemEventSetWait(cRequest) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceModifyDrainState function as declared in nvml/nvml.h func nvmlDeviceModifyDrainState(PciInfo *PciInfo, NewState EnableState) Return { cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown @@ -1759,41 +2326,95 @@ func nvmlDeviceSetVirtualizationMode(nvmlDevice nvmlDevice, VirtualMode GpuVirtu return __v } -// nvmlDeviceGetGridLicensableFeatures_v4 function as declared in nvml/nvml.h -func nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice nvmlDevice, PGridLicensableFeatures *GridLicensableFeatures) Return { +// nvmlDeviceGetVgpuHeterogeneousMode function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice nvmlDevice, PHeterogeneousMode *VgpuHeterogeneousMode) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetGridLicensableFeatures_v4(cnvmlDevice, cPGridLicensableFeatures) + cPHeterogeneousMode, _ := (*C.nvmlVgpuHeterogeneousMode_t)(unsafe.Pointer(PHeterogeneousMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuHeterogeneousMode(cnvmlDevice, cPHeterogeneousMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVgpuHeterogeneousMode function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice nvmlDevice, PHeterogeneousMode *VgpuHeterogeneousMode) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cPHeterogeneousMode, _ := (*C.nvmlVgpuHeterogeneousMode_t)(unsafe.Pointer(PHeterogeneousMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuHeterogeneousMode(cnvmlDevice, cPHeterogeneousMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetPlacementId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance nvmlVgpuInstance, PPlacement *VgpuPlacementId) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cPPlacement, _ := (*C.nvmlVgpuPlacementId_t)(unsafe.Pointer(PPlacement)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetPlacementId(cnvmlVgpuInstance, cPPlacement) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuTypeSupportedPlacements function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice nvmlDevice, nvmlVgpuTypeId nvmlVgpuTypeId, PPlacementList *VgpuPlacementList) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cPPlacementList, _ := (*C.nvmlVgpuPlacementList_t)(unsafe.Pointer(PPlacementList)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuTypeSupportedPlacements(cnvmlDevice, cnvmlVgpuTypeId, cPPlacementList) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuTypeCreatablePlacements function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice nvmlDevice, nvmlVgpuTypeId nvmlVgpuTypeId, PPlacementList *VgpuPlacementList) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cPPlacementList, _ := (*C.nvmlVgpuPlacementList_t)(unsafe.Pointer(PPlacementList)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuTypeCreatablePlacements(cnvmlDevice, cnvmlVgpuTypeId, cPPlacementList) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetGspHeapSize function as declared in nvml/nvml.h +func nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId nvmlVgpuTypeId, GspHeapSize *uint64) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cGspHeapSize, _ := (*C.ulonglong)(unsafe.Pointer(GspHeapSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetGspHeapSize(cnvmlVgpuTypeId, cGspHeapSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFbReservation function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId nvmlVgpuTypeId, FbReservation *uint64) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cFbReservation, _ := (*C.ulonglong)(unsafe.Pointer(FbReservation)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFbReservation(cnvmlVgpuTypeId, cFbReservation) __v := (Return)(__ret) return __v } -// nvmlDeviceGetProcessUtilization function as declared in nvml/nvml.h -func nvmlDeviceGetProcessUtilization(nvmlDevice nvmlDevice, Utilization *ProcessUtilizationSample, ProcessSamplesCount *uint32, LastSeenTimeStamp uint64) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cUtilization, _ := (*C.nvmlProcessUtilizationSample_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown - cProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(ProcessSamplesCount)), cgoAllocsUnknown - cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown - __ret := C.nvmlDeviceGetProcessUtilization(cnvmlDevice, cUtilization, cProcessSamplesCount, cLastSeenTimeStamp) +// nvmlVgpuInstanceGetRuntimeStateSize function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetRuntimeStateSize(nvmlVgpuInstance nvmlVgpuInstance, PState *VgpuRuntimeState) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cPState, _ := (*C.nvmlVgpuRuntimeState_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetRuntimeStateSize(cnvmlVgpuInstance, cPState) __v := (Return)(__ret) return __v } -// nvmlDeviceGetGspFirmwareVersion function as declared in nvml/nvml.h -func nvmlDeviceGetGspFirmwareVersion(nvmlDevice nvmlDevice, Version *byte) Return { +// nvmlDeviceSetVgpuCapabilities function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuCapabilities(nvmlDevice nvmlDevice, Capability DeviceVgpuCapability, State EnableState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetGspFirmwareVersion(cnvmlDevice, cVersion) + cCapability, _ := (C.nvmlDeviceVgpuCapability_t)(Capability), cgoAllocsUnknown + cState, _ := (C.nvmlEnableState_t)(State), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuCapabilities(cnvmlDevice, cCapability, cState) __v := (Return)(__ret) return __v } -// nvmlDeviceGetGspFirmwareMode function as declared in nvml/nvml.h -func nvmlDeviceGetGspFirmwareMode(nvmlDevice nvmlDevice, IsEnabled *uint32, DefaultMode *uint32) Return { +// nvmlDeviceGetGridLicensableFeatures_v4 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice nvmlDevice, PGridLicensableFeatures *GridLicensableFeatures) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cIsEnabled, _ := (*C.uint)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown - cDefaultMode, _ := (*C.uint)(unsafe.Pointer(DefaultMode)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetGspFirmwareMode(cnvmlDevice, cIsEnabled, cDefaultMode) + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v4(cnvmlDevice, cPGridLicensableFeatures) __v := (Return)(__ret) return __v } @@ -1943,6 +2564,15 @@ func nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId nvmlVgpuTypeId, VgpuInstanc return __v } +// nvmlVgpuTypeGetBAR1Info function as declared in nvml/nvml.h +func nvmlVgpuTypeGetBAR1Info(nvmlVgpuTypeId nvmlVgpuTypeId, Bar1Info *VgpuTypeBar1Info) Return { + cnvmlVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(nvmlVgpuTypeId), cgoAllocsUnknown + cBar1Info, _ := (*C.nvmlVgpuTypeBar1Info_t)(unsafe.Pointer(Bar1Info)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetBAR1Info(cnvmlVgpuTypeId, cBar1Info) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetActiveVgpus function as declared in nvml/nvml.h func nvmlDeviceGetActiveVgpus(nvmlDevice nvmlDevice, VgpuCount *uint32, VgpuInstances *nvmlVgpuInstance) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -2116,6 +2746,96 @@ func nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId nvmlVgpuTypeId, Capability VgpuC return __v } +// nvmlVgpuInstanceGetMdevUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance nvmlVgpuInstance, MdevUuid *byte, Size uint32) Return { + cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown + cMdevUuid, _ := (*C.char)(unsafe.Pointer(MdevUuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMdevUUID(cnvmlVgpuInstance, cMdevUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetCreatableVgpus function as declared in nvml/nvml.h +func nvmlGpuInstanceGetCreatableVgpus(nvmlGpuInstance nvmlGpuInstance, PVgpus *VgpuTypeIdInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPVgpus, _ := (*C.nvmlVgpuTypeIdInfo_t)(unsafe.Pointer(PVgpus)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetCreatableVgpus(cnvmlGpuInstance, cPVgpus) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstancesPerGpuInstance function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstancesPerGpuInstance(PMaxInstance *VgpuTypeMaxInstance) Return { + cPMaxInstance, _ := (*C.nvmlVgpuTypeMaxInstance_t)(unsafe.Pointer(PMaxInstance)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstancesPerGpuInstance(cPMaxInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetActiveVgpus function as declared in nvml/nvml.h +func nvmlGpuInstanceGetActiveVgpus(nvmlGpuInstance nvmlGpuInstance, PVgpuInstanceInfo *ActiveVgpuInstanceInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPVgpuInstanceInfo, _ := (*C.nvmlActiveVgpuInstanceInfo_t)(unsafe.Pointer(PVgpuInstanceInfo)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetActiveVgpus(cnvmlGpuInstance, cPVgpuInstanceInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceSetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlGpuInstanceSetVgpuSchedulerState(nvmlGpuInstance nvmlGpuInstance, PScheduler *VgpuSchedulerState) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPScheduler, _ := (*C.nvmlVgpuSchedulerState_t)(unsafe.Pointer(PScheduler)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceSetVgpuSchedulerState(cnvmlGpuInstance, cPScheduler) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlGpuInstanceGetVgpuSchedulerState(nvmlGpuInstance nvmlGpuInstance, PSchedulerStateInfo *VgpuSchedulerStateInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPSchedulerStateInfo, _ := (*C.nvmlVgpuSchedulerStateInfo_t)(unsafe.Pointer(PSchedulerStateInfo)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetVgpuSchedulerState(cnvmlGpuInstance, cPSchedulerStateInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetVgpuSchedulerLog function as declared in nvml/nvml.h +func nvmlGpuInstanceGetVgpuSchedulerLog(nvmlGpuInstance nvmlGpuInstance, PSchedulerLogInfo *VgpuSchedulerLogInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPSchedulerLogInfo, _ := (*C.nvmlVgpuSchedulerLogInfo_t)(unsafe.Pointer(PSchedulerLogInfo)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetVgpuSchedulerLog(cnvmlGpuInstance, cPSchedulerLogInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetVgpuTypeCreatablePlacements function as declared in nvml/nvml.h +func nvmlGpuInstanceGetVgpuTypeCreatablePlacements(nvmlGpuInstance nvmlGpuInstance, PCreatablePlacementInfo *VgpuCreatablePlacementInfo) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPCreatablePlacementInfo, _ := (*C.nvmlVgpuCreatablePlacementInfo_t)(unsafe.Pointer(PCreatablePlacementInfo)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetVgpuTypeCreatablePlacements(cnvmlGpuInstance, cPCreatablePlacementInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetVgpuHeterogeneousMode function as declared in nvml/nvml.h +func nvmlGpuInstanceGetVgpuHeterogeneousMode(nvmlGpuInstance nvmlGpuInstance, PHeterogeneousMode *VgpuHeterogeneousMode) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPHeterogeneousMode, _ := (*C.nvmlVgpuHeterogeneousMode_t)(unsafe.Pointer(PHeterogeneousMode)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetVgpuHeterogeneousMode(cnvmlGpuInstance, cPHeterogeneousMode) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceSetVgpuHeterogeneousMode function as declared in nvml/nvml.h +func nvmlGpuInstanceSetVgpuHeterogeneousMode(nvmlGpuInstance nvmlGpuInstance, PHeterogeneousMode *VgpuHeterogeneousMode) Return { + cnvmlGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&nvmlGpuInstance)), cgoAllocsUnknown + cPHeterogeneousMode, _ := (*C.nvmlVgpuHeterogeneousMode_t)(unsafe.Pointer(PHeterogeneousMode)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceSetVgpuHeterogeneousMode(cnvmlGpuInstance, cPHeterogeneousMode) + __v := (Return)(__ret) + return __v +} + // nvmlVgpuInstanceGetMetadata function as declared in nvml/nvml.h func nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance nvmlVgpuInstance, nvmlVgpuMetadata *nvmlVgpuMetadata, BufferSize *uint32) Return { cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown @@ -2174,20 +2894,20 @@ func nvmlDeviceGetVgpuSchedulerState(nvmlDevice nvmlDevice, PSchedulerState *Vgp return __v } -// nvmlDeviceSetVgpuSchedulerState function as declared in nvml/nvml.h -func nvmlDeviceSetVgpuSchedulerState(nvmlDevice nvmlDevice, PSchedulerState *VgpuSchedulerSetState) Return { +// nvmlDeviceGetVgpuSchedulerCapabilities function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice nvmlDevice, PCapabilities *VgpuSchedulerCapabilities) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cPSchedulerState, _ := (*C.nvmlVgpuSchedulerSetState_t)(unsafe.Pointer(PSchedulerState)), cgoAllocsUnknown - __ret := C.nvmlDeviceSetVgpuSchedulerState(cnvmlDevice, cPSchedulerState) + cPCapabilities, _ := (*C.nvmlVgpuSchedulerCapabilities_t)(unsafe.Pointer(PCapabilities)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerCapabilities(cnvmlDevice, cPCapabilities) __v := (Return)(__ret) return __v } -// nvmlDeviceGetVgpuSchedulerCapabilities function as declared in nvml/nvml.h -func nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice nvmlDevice, PCapabilities *VgpuSchedulerCapabilities) Return { +// nvmlDeviceSetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuSchedulerState(nvmlDevice nvmlDevice, PSchedulerState *VgpuSchedulerSetState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cPCapabilities, _ := (*C.nvmlVgpuSchedulerCapabilities_t)(unsafe.Pointer(PCapabilities)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetVgpuSchedulerCapabilities(cnvmlDevice, cPCapabilities) + cPSchedulerState, _ := (*C.nvmlVgpuSchedulerSetState_t)(unsafe.Pointer(PSchedulerState)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuSchedulerState(cnvmlDevice, cPSchedulerState) __v := (Return)(__ret) return __v } @@ -2221,6 +2941,15 @@ func nvmlDeviceGetVgpuUtilization(nvmlDevice nvmlDevice, LastSeenTimeStamp uint6 return __v } +// nvmlDeviceGetVgpuInstancesUtilizationInfo function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice nvmlDevice, VgpuUtilInfo *VgpuInstancesUtilizationInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuUtilInfo, _ := (*C.nvmlVgpuInstancesUtilizationInfo_t)(unsafe.Pointer(VgpuUtilInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuInstancesUtilizationInfo(cnvmlDevice, cVgpuUtilInfo) + __v := (Return)(__ret) + return __v +} + // nvmlDeviceGetVgpuProcessUtilization function as declared in nvml/nvml.h func nvmlDeviceGetVgpuProcessUtilization(nvmlDevice nvmlDevice, LastSeenTimeStamp uint64, VgpuProcessSamplesCount *uint32, UtilizationSamples *VgpuProcessUtilizationSample) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown @@ -2232,6 +2961,15 @@ func nvmlDeviceGetVgpuProcessUtilization(nvmlDevice nvmlDevice, LastSeenTimeStam return __v } +// nvmlDeviceGetVgpuProcessesUtilizationInfo function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice nvmlDevice, VgpuProcUtilInfo *VgpuProcessesUtilizationInfo) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cVgpuProcUtilInfo, _ := (*C.nvmlVgpuProcessesUtilizationInfo_t)(unsafe.Pointer(VgpuProcUtilInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuProcessesUtilizationInfo(cnvmlDevice, cVgpuProcUtilInfo) + __v := (Return)(__ret) + return __v +} + // nvmlVgpuInstanceGetAccountingMode function as declared in nvml/nvml.h func nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance nvmlVgpuInstance, Mode *EnableState) Return { cnvmlVgpuInstance, _ := (C.nvmlVgpuInstance_t)(nvmlVgpuInstance), cgoAllocsUnknown @@ -2572,196 +3310,144 @@ func nvmlDeviceGetDeviceHandleFromMigDeviceHandle(MigDevice nvmlDevice, nvmlDevi return __v } -// nvmlDeviceGetBusType function as declared in nvml/nvml.h -func nvmlDeviceGetBusType(nvmlDevice nvmlDevice, _type *BusType) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - c_type, _ := (*C.nvmlBusType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetBusType(cnvmlDevice, c_type) - __v := (Return)(__ret) - return __v -} - -// nvmlDeviceGetDynamicPstatesInfo function as declared in nvml/nvml.h -func nvmlDeviceGetDynamicPstatesInfo(nvmlDevice nvmlDevice, PDynamicPstatesInfo *GpuDynamicPstatesInfo) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cPDynamicPstatesInfo, _ := (*C.nvmlGpuDynamicPstatesInfo_t)(unsafe.Pointer(PDynamicPstatesInfo)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetDynamicPstatesInfo(cnvmlDevice, cPDynamicPstatesInfo) - __v := (Return)(__ret) - return __v -} - -// nvmlDeviceSetFanSpeed_v2 function as declared in nvml/nvml.h -func nvmlDeviceSetFanSpeed_v2(nvmlDevice nvmlDevice, Fan uint32, Speed uint32) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cFan, _ := (C.uint)(Fan), cgoAllocsUnknown - cSpeed, _ := (C.uint)(Speed), cgoAllocsUnknown - __ret := C.nvmlDeviceSetFanSpeed_v2(cnvmlDevice, cFan, cSpeed) +// nvmlGpmMetricsGet function as declared in nvml/nvml.h +func nvmlGpmMetricsGet(MetricsGet *nvmlGpmMetricsGetType) Return { + cMetricsGet, _ := (*C.nvmlGpmMetricsGet_t)(unsafe.Pointer(MetricsGet)), cgoAllocsUnknown + __ret := C.nvmlGpmMetricsGet(cMetricsGet) __v := (Return)(__ret) return __v } -// nvmlDeviceGetGpcClkVfOffset function as declared in nvml/nvml.h -func nvmlDeviceGetGpcClkVfOffset(nvmlDevice nvmlDevice, Offset *int32) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetGpcClkVfOffset(cnvmlDevice, cOffset) +// nvmlGpmSampleFree function as declared in nvml/nvml.h +func nvmlGpmSampleFree(nvmlGpmSample nvmlGpmSample) Return { + cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleFree(cnvmlGpmSample) __v := (Return)(__ret) return __v } -// nvmlDeviceSetGpcClkVfOffset function as declared in nvml/nvml.h -func nvmlDeviceSetGpcClkVfOffset(nvmlDevice nvmlDevice, Offset int32) Return { - cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cOffset, _ := (C.int)(Offset), cgoAllocsUnknown - __ret := C.nvmlDeviceSetGpcClkVfOffset(cnvmlDevice, cOffset) +// nvmlGpmSampleAlloc function as declared in nvml/nvml.h +func nvmlGpmSampleAlloc(nvmlGpmSample *nvmlGpmSample) Return { + cnvmlGpmSample, _ := (*C.nvmlGpmSample_t)(unsafe.Pointer(nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleAlloc(cnvmlGpmSample) __v := (Return)(__ret) return __v } -// nvmlDeviceGetMemClkVfOffset function as declared in nvml/nvml.h -func nvmlDeviceGetMemClkVfOffset(nvmlDevice nvmlDevice, Offset *int32) Return { +// nvmlGpmSampleGet function as declared in nvml/nvml.h +func nvmlGpmSampleGet(nvmlDevice nvmlDevice, nvmlGpmSample nvmlGpmSample) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetMemClkVfOffset(cnvmlDevice, cOffset) + cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleGet(cnvmlDevice, cnvmlGpmSample) __v := (Return)(__ret) return __v } -// nvmlDeviceSetMemClkVfOffset function as declared in nvml/nvml.h -func nvmlDeviceSetMemClkVfOffset(nvmlDevice nvmlDevice, Offset int32) Return { +// nvmlGpmMigSampleGet function as declared in nvml/nvml.h +func nvmlGpmMigSampleGet(nvmlDevice nvmlDevice, GpuInstanceId uint32, nvmlGpmSample nvmlGpmSample) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cOffset, _ := (C.int)(Offset), cgoAllocsUnknown - __ret := C.nvmlDeviceSetMemClkVfOffset(cnvmlDevice, cOffset) + cGpuInstanceId, _ := (C.uint)(GpuInstanceId), cgoAllocsUnknown + cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmMigSampleGet(cnvmlDevice, cGpuInstanceId, cnvmlGpmSample) __v := (Return)(__ret) return __v } -// nvmlDeviceGetMinMaxClockOfPState function as declared in nvml/nvml.h -func nvmlDeviceGetMinMaxClockOfPState(nvmlDevice nvmlDevice, _type ClockType, Pstate Pstates, MinClockMHz *uint32, MaxClockMHz *uint32) Return { +// nvmlGpmQueryDeviceSupport function as declared in nvml/nvml.h +func nvmlGpmQueryDeviceSupport(nvmlDevice nvmlDevice, GpmSupport *GpmSupport) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown - cPstate, _ := (C.nvmlPstates_t)(Pstate), cgoAllocsUnknown - cMinClockMHz, _ := (*C.uint)(unsafe.Pointer(MinClockMHz)), cgoAllocsUnknown - cMaxClockMHz, _ := (*C.uint)(unsafe.Pointer(MaxClockMHz)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetMinMaxClockOfPState(cnvmlDevice, c_type, cPstate, cMinClockMHz, cMaxClockMHz) + cGpmSupport, _ := (*C.nvmlGpmSupport_t)(unsafe.Pointer(GpmSupport)), cgoAllocsUnknown + __ret := C.nvmlGpmQueryDeviceSupport(cnvmlDevice, cGpmSupport) __v := (Return)(__ret) return __v } -// nvmlDeviceGetSupportedPerformanceStates function as declared in nvml/nvml.h -func nvmlDeviceGetSupportedPerformanceStates(nvmlDevice nvmlDevice, Pstates *Pstates, Size uint32) Return { +// nvmlGpmQueryIfStreamingEnabled function as declared in nvml/nvml.h +func nvmlGpmQueryIfStreamingEnabled(nvmlDevice nvmlDevice, State *uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cPstates, _ := (*C.nvmlPstates_t)(unsafe.Pointer(Pstates)), cgoAllocsUnknown - cSize, _ := (C.uint)(Size), cgoAllocsUnknown - __ret := C.nvmlDeviceGetSupportedPerformanceStates(cnvmlDevice, cPstates, cSize) + cState, _ := (*C.uint)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlGpmQueryIfStreamingEnabled(cnvmlDevice, cState) __v := (Return)(__ret) return __v } -// nvmlDeviceGetGpcClkMinMaxVfOffset function as declared in nvml/nvml.h -func nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice nvmlDevice, MinOffset *int32, MaxOffset *int32) Return { +// nvmlGpmSetStreamingEnabled function as declared in nvml/nvml.h +func nvmlGpmSetStreamingEnabled(nvmlDevice nvmlDevice, State uint32) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown - cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetGpcClkMinMaxVfOffset(cnvmlDevice, cMinOffset, cMaxOffset) + cState, _ := (C.uint)(State), cgoAllocsUnknown + __ret := C.nvmlGpmSetStreamingEnabled(cnvmlDevice, cState) __v := (Return)(__ret) return __v } -// nvmlDeviceGetMemClkMinMaxVfOffset function as declared in nvml/nvml.h -func nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice nvmlDevice, MinOffset *int32, MaxOffset *int32) Return { +// nvmlDeviceGetCapabilities function as declared in nvml/nvml.h +func nvmlDeviceGetCapabilities(nvmlDevice nvmlDevice, Caps *DeviceCapabilities) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown - cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetMemClkMinMaxVfOffset(cnvmlDevice, cMinOffset, cMaxOffset) + cCaps, _ := (*C.nvmlDeviceCapabilities_t)(unsafe.Pointer(Caps)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCapabilities(cnvmlDevice, cCaps) __v := (Return)(__ret) return __v } -// nvmlDeviceGetGpuFabricInfo function as declared in nvml/nvml.h -func nvmlDeviceGetGpuFabricInfo(nvmlDevice nvmlDevice, GpuFabricInfo *GpuFabricInfo) Return { +// nvmlDeviceWorkloadPowerProfileGetProfilesInfo function as declared in nvml/nvml.h +func nvmlDeviceWorkloadPowerProfileGetProfilesInfo(nvmlDevice nvmlDevice, ProfilesInfo *WorkloadPowerProfileProfilesInfo) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cGpuFabricInfo, _ := (*C.nvmlGpuFabricInfo_t)(unsafe.Pointer(GpuFabricInfo)), cgoAllocsUnknown - __ret := C.nvmlDeviceGetGpuFabricInfo(cnvmlDevice, cGpuFabricInfo) - __v := (Return)(__ret) - return __v -} - -// nvmlGpmMetricsGet function as declared in nvml/nvml.h -func nvmlGpmMetricsGet(MetricsGet *nvmlGpmMetricsGetType) Return { - cMetricsGet, _ := (*C.nvmlGpmMetricsGet_t)(unsafe.Pointer(MetricsGet)), cgoAllocsUnknown - __ret := C.nvmlGpmMetricsGet(cMetricsGet) - __v := (Return)(__ret) - return __v -} - -// nvmlGpmSampleFree function as declared in nvml/nvml.h -func nvmlGpmSampleFree(nvmlGpmSample nvmlGpmSample) Return { - cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown - __ret := C.nvmlGpmSampleFree(cnvmlGpmSample) - __v := (Return)(__ret) - return __v -} - -// nvmlGpmSampleAlloc function as declared in nvml/nvml.h -func nvmlGpmSampleAlloc(nvmlGpmSample *nvmlGpmSample) Return { - cnvmlGpmSample, _ := (*C.nvmlGpmSample_t)(unsafe.Pointer(nvmlGpmSample)), cgoAllocsUnknown - __ret := C.nvmlGpmSampleAlloc(cnvmlGpmSample) + cProfilesInfo, _ := (*C.nvmlWorkloadPowerProfileProfilesInfo_t)(unsafe.Pointer(ProfilesInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceWorkloadPowerProfileGetProfilesInfo(cnvmlDevice, cProfilesInfo) __v := (Return)(__ret) return __v } -// nvmlGpmSampleGet function as declared in nvml/nvml.h -func nvmlGpmSampleGet(nvmlDevice nvmlDevice, nvmlGpmSample nvmlGpmSample) Return { +// nvmlDeviceWorkloadPowerProfileGetCurrentProfiles function as declared in nvml/nvml.h +func nvmlDeviceWorkloadPowerProfileGetCurrentProfiles(nvmlDevice nvmlDevice, CurrentProfiles *WorkloadPowerProfileCurrentProfiles) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown - __ret := C.nvmlGpmSampleGet(cnvmlDevice, cnvmlGpmSample) + cCurrentProfiles, _ := (*C.nvmlWorkloadPowerProfileCurrentProfiles_t)(unsafe.Pointer(CurrentProfiles)), cgoAllocsUnknown + __ret := C.nvmlDeviceWorkloadPowerProfileGetCurrentProfiles(cnvmlDevice, cCurrentProfiles) __v := (Return)(__ret) return __v } -// nvmlGpmMigSampleGet function as declared in nvml/nvml.h -func nvmlGpmMigSampleGet(nvmlDevice nvmlDevice, GpuInstanceId uint32, nvmlGpmSample nvmlGpmSample) Return { +// nvmlDeviceWorkloadPowerProfileSetRequestedProfiles function as declared in nvml/nvml.h +func nvmlDeviceWorkloadPowerProfileSetRequestedProfiles(nvmlDevice nvmlDevice, RequestedProfiles *WorkloadPowerProfileRequestedProfiles) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cGpuInstanceId, _ := (C.uint)(GpuInstanceId), cgoAllocsUnknown - cnvmlGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&nvmlGpmSample)), cgoAllocsUnknown - __ret := C.nvmlGpmMigSampleGet(cnvmlDevice, cGpuInstanceId, cnvmlGpmSample) + cRequestedProfiles, _ := (*C.nvmlWorkloadPowerProfileRequestedProfiles_t)(unsafe.Pointer(RequestedProfiles)), cgoAllocsUnknown + __ret := C.nvmlDeviceWorkloadPowerProfileSetRequestedProfiles(cnvmlDevice, cRequestedProfiles) __v := (Return)(__ret) return __v } -// nvmlGpmQueryDeviceSupport function as declared in nvml/nvml.h -func nvmlGpmQueryDeviceSupport(nvmlDevice nvmlDevice, GpmSupport *GpmSupport) Return { +// nvmlDeviceWorkloadPowerProfileClearRequestedProfiles function as declared in nvml/nvml.h +func nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice nvmlDevice, RequestedProfiles *WorkloadPowerProfileRequestedProfiles) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cGpmSupport, _ := (*C.nvmlGpmSupport_t)(unsafe.Pointer(GpmSupport)), cgoAllocsUnknown - __ret := C.nvmlGpmQueryDeviceSupport(cnvmlDevice, cGpmSupport) + cRequestedProfiles, _ := (*C.nvmlWorkloadPowerProfileRequestedProfiles_t)(unsafe.Pointer(RequestedProfiles)), cgoAllocsUnknown + __ret := C.nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(cnvmlDevice, cRequestedProfiles) __v := (Return)(__ret) return __v } -// nvmlDeviceCcuGetStreamState function as declared in nvml/nvml.h -func nvmlDeviceCcuGetStreamState(nvmlDevice nvmlDevice, State *uint32) Return { +// nvmlDevicePowerSmoothingActivatePresetProfile function as declared in nvml/nvml.h +func nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice nvmlDevice, Profile *PowerSmoothingProfile) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cState, _ := (*C.uint)(unsafe.Pointer(State)), cgoAllocsUnknown - __ret := C.nvmlDeviceCcuGetStreamState(cnvmlDevice, cState) + cProfile, _ := (*C.nvmlPowerSmoothingProfile_t)(unsafe.Pointer(Profile)), cgoAllocsUnknown + __ret := C.nvmlDevicePowerSmoothingActivatePresetProfile(cnvmlDevice, cProfile) __v := (Return)(__ret) return __v } -// nvmlDeviceCcuSetStreamState function as declared in nvml/nvml.h -func nvmlDeviceCcuSetStreamState(nvmlDevice nvmlDevice, State uint32) Return { +// nvmlDevicePowerSmoothingUpdatePresetProfileParam function as declared in nvml/nvml.h +func nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice nvmlDevice, Profile *PowerSmoothingProfile) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cState, _ := (C.uint)(State), cgoAllocsUnknown - __ret := C.nvmlDeviceCcuSetStreamState(cnvmlDevice, cState) + cProfile, _ := (*C.nvmlPowerSmoothingProfile_t)(unsafe.Pointer(Profile)), cgoAllocsUnknown + __ret := C.nvmlDevicePowerSmoothingUpdatePresetProfileParam(cnvmlDevice, cProfile) __v := (Return)(__ret) return __v } -// nvmlDeviceSetNvLinkDeviceLowPowerThreshold function as declared in nvml/nvml.h -func nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice nvmlDevice, Info *NvLinkPowerThres) Return { +// nvmlDevicePowerSmoothingSetState function as declared in nvml/nvml.h +func nvmlDevicePowerSmoothingSetState(nvmlDevice nvmlDevice, State *PowerSmoothingState) Return { cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown - cInfo, _ := (*C.nvmlNvLinkPowerThres_t)(unsafe.Pointer(Info)), cgoAllocsUnknown - __ret := C.nvmlDeviceSetNvLinkDeviceLowPowerThreshold(cnvmlDevice, cInfo) + cState, _ := (*C.nvmlPowerSmoothingState_t)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlDevicePowerSmoothingSetState(cnvmlDevice, cState) __v := (Return)(__ret) return __v } @@ -2969,3 +3655,13 @@ func nvmlVgpuInstanceGetLicenseInfo_v1(nvmlVgpuInstance nvmlVgpuInstance, Licens __v := (Return)(__ret) return __v } + +// nvmlDeviceGetDriverModel_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetDriverModel_v1(nvmlDevice nvmlDevice, Current *DriverModel, Pending *DriverModel) Return { + cnvmlDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&nvmlDevice)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDriverModel(cnvmlDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h index 8c71ff8..28a6547 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h @@ -1,7 +1,7 @@ -/*** NVML VERSION: 12.0.76 ***/ -/*** From https://api.anaconda.org/download/nvidia/cuda-nvml-dev/12.0.76/linux-64/cuda-nvml-dev-12.0.76-0.tar.bz2 ***/ +/*** NVML VERSION: 12.9.40 ***/ +/*** From https://gitlab.com/nvidia/headers/cuda-individual/nvml_dev/-/raw/v12.9.40/nvml.h ***/ /* - * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * Copyright 1993-2025 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * @@ -94,11 +94,13 @@ extern "C" { #define DECLDIR #endif + #define NVML_MCDM_SUPPORT + /** * NVML API versioning support */ -#define NVML_API_VERSION 11 -#define NVML_API_VERSION_STR "11" +#define NVML_API_VERSION 12 +#define NVML_API_VERSION_STR "12" /** * Defining NVML_NO_UNVERSIONED_FUNC_DEFS will disable "auto upgrading" of APIs. * e.g. the user will have to call nvmlInit_v2 instead of nvmlInit. Enable this @@ -124,6 +126,7 @@ extern "C" { #define nvmlGetBlacklistDeviceInfoByIndex nvmlGetExcludedDeviceInfoByIndex #define nvmlDeviceGetGpuInstancePossiblePlacements nvmlDeviceGetGpuInstancePossiblePlacements_v2 #define nvmlVgpuInstanceGetLicenseInfo nvmlVgpuInstanceGetLicenseInfo_v2 + #define nvmlDeviceGetDriverModel nvmlDeviceGetDriverModel_v2 #endif // #ifndef NVML_NO_UNVERSIONED_FUNC_DEFS #define NVML_STRUCT_VERSION(data, ver) (unsigned int)(sizeof(nvml ## data ## _v ## ver ## _t) | \ @@ -148,6 +151,11 @@ typedef struct struct nvmlDevice_st* handle; } nvmlDevice_t; +typedef struct +{ + struct nvmlGpuInstance_st* handle; +} nvmlGpuInstance_t; + /** * Buffer size guaranteed to be large enough for pci bus id */ @@ -158,6 +166,27 @@ typedef struct */ #define NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE 16 +/** + * PCI information about a GPU device. + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffffffff + unsigned int bus; //!< The bus on which the device resides, 0 to 0xff + unsigned int device; //!< The device's id on the bus, 0 to 31 + + unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id + unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID + + unsigned int baseClass; //!< The 8-bit PCI base class code + unsigned int subClass; //!< The 8-bit PCI sub class code + + char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) +} nvmlPciInfoExt_v1_t; +typedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t; +#define nvmlPciInfoExt_v1 NVML_STRUCT_VERSION(PciInfoExt, 1) + /** * PCI information about a GPU device. */ @@ -232,7 +261,6 @@ typedef struct nvmlMemory_st * Memory allocation information for a device (v2). * * Version 2 adds versioning for the struct and the amount of system-reserved memory as an output. - * @note The \ref nvmlMemory_v2_t.used amount also includes the \ref nvmlMemory_v2_t.reserved amount. */ typedef struct nvmlMemory_v2_st { @@ -240,7 +268,7 @@ typedef struct nvmlMemory_v2_st unsigned long long total; //!< Total physical device memory (in bytes) unsigned long long reserved; //!< Device memory (in bytes) reserved for system use (driver or firmware) unsigned long long free; //!< Unallocated device memory (in bytes) - unsigned long long used; //!< Allocated device memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping + unsigned long long used; //!< Allocated device memory (in bytes). } nvmlMemory_v2_t; #define nvmlMemory_v2 NVML_STRUCT_VERSION(Memory, 2) @@ -280,23 +308,41 @@ typedef struct nvmlProcessInfo_v2_st // 0xFFFFFFFF otherwise. unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to // 0xFFFFFFFF otherwise. -} nvmlProcessInfo_v2_t; +} nvmlProcessInfo_v2_t, nvmlProcessInfo_t; /** - * Information about running compute processes on the GPU - * Version 2 adds versioning for the struct + * Information about running process on the GPU with protected memory */ -typedef struct nvmlProcessInfo_st +typedef struct { - unsigned int pid; //!< Process ID - unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. - //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported - //! because Windows KMD manages all the memory and not the NVIDIA driver - unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to - // 0xFFFFFFFF otherwise. - unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to - // 0xFFFFFFFF otherwise. -} nvmlProcessInfo_t; + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is + // set to 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId + // is set to 0xFFFFFFFF otherwise. + unsigned long long usedGpuCcProtectedMemory; //!< Amount of used GPU conf compute protected memory in bytes. +} nvmlProcessDetail_v1_t; + +/** + * Information about all running processes on the GPU for the given mode + */ +typedef struct +{ + unsigned int version; //!< Struct version, MUST be nvmlProcessDetailList_v1 + unsigned int mode; //!< Process mode(Compute/Graphics/MPSCompute) + unsigned int numProcArrayEntries; //!< Number of process entries in procArray + nvmlProcessDetail_v1_t *procArray; //!< Process array +} nvmlProcessDetailList_v1_t; + +typedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t; + +/** + * nvmlProcessDetailList version + */ +#define nvmlProcessDetailList_v1 NVML_STRUCT_VERSION(ProcessDetailList, 1) typedef struct nvmlDeviceAttributes_st { @@ -311,6 +357,16 @@ typedef struct nvmlDeviceAttributes_st unsigned long long memorySizeMB; //!< Device memory size (in MiB) } nvmlDeviceAttributes_t; +/** + * C2C Mode information for a device + */ +typedef struct +{ + unsigned int isC2cEnabled; +} nvmlC2cModeInfo_v1_t; + +#define nvmlC2cModeInfo_v1 NVML_STRUCT_VERSION(C2cModeInfo, 1) + /** * Possible values that classify the remap availability for each bank. The max * field will contain the number of banks that have maximum remap availability @@ -447,6 +503,7 @@ typedef enum nvmlGpuP2PStatus_enum { NVML_P2P_STATUS_OK = 0, NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, + NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, NVML_P2P_STATUS_GPU_NOT_SUPPORTED, NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, NVML_P2P_STATUS_DISABLED_BY_REGKEY, @@ -459,11 +516,16 @@ typedef enum nvmlGpuP2PStatus_enum typedef enum nvmlGpuP2PCapsIndex_enum { NVML_P2P_CAPS_INDEX_READ = 0, - NVML_P2P_CAPS_INDEX_WRITE, - NVML_P2P_CAPS_INDEX_NVLINK, - NVML_P2P_CAPS_INDEX_ATOMICS, - NVML_P2P_CAPS_INDEX_PROP, - NVML_P2P_CAPS_INDEX_UNKNOWN + NVML_P2P_CAPS_INDEX_WRITE = 1, + NVML_P2P_CAPS_INDEX_NVLINK = 2, + NVML_P2P_CAPS_INDEX_ATOMICS = 3, + NVML_P2P_CAPS_INDEX_PCI = 4, + /* + * DO NOT USE! NVML_P2P_CAPS_INDEX_PROP is deprecated. + * Use NVML_P2P_CAPS_INDEX_PCI instead. + */ + NVML_P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PCI, + NVML_P2P_CAPS_INDEX_UNKNOWN = 5, }nvmlGpuP2PCapsIndex_t; /** @@ -502,6 +564,9 @@ typedef enum nvmlSamplingType_enum NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples + NVML_MODULE_POWER_SAMPLES = 7, //!< To represent module power samples for total module starting Grace Hopper + NVML_JPG_UTILIZATION_SAMPLES = 8, //!< To represent percent of time during which NVJPG remains busy + NVML_OFA_UTILIZATION_SAMPLES = 9, //!< To represent percent of time during which NVOFA remains busy // Keep this last NVML_SAMPLINGTYPE_COUNT @@ -529,22 +594,25 @@ typedef enum nvmlValueType_enum NVML_VALUE_TYPE_UNSIGNED_LONG = 2, NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, + NVML_VALUE_TYPE_SIGNED_INT = 5, + NVML_VALUE_TYPE_UNSIGNED_SHORT = 6, // Keep this last NVML_VALUE_TYPE_COUNT }nvmlValueType_t; - /** * Union to represent different types of Value */ typedef union nvmlValue_st { double dVal; //!< If the value is double + int siVal; //!< If the value is signed int unsigned int uiVal; //!< If the value is unsigned int unsigned long ulVal; //!< If the value is unsigned long unsigned long long ullVal; //!< If the value is unsigned long long signed long long sllVal; //!< If the value is signed long long + unsigned short usVal; //!< If the value is unsigned short }nvmlValue_t; /** @@ -586,6 +654,9 @@ typedef struct nvmlViolationTime_st #define NVML_MAX_THERMAL_SENSORS_PER_GPU 3 +/** + * Represents the thermal sensor targets + */ typedef enum { NVML_THERMAL_TARGET_NONE = 0, @@ -601,6 +672,9 @@ typedef enum NVML_THERMAL_TARGET_UNKNOWN = -1, } nvmlThermalTarget_t; +/** + * Represents the thermal sensor controllers + */ typedef enum { NVML_THERMAL_CONTROLLER_NONE = 0, @@ -632,6 +706,9 @@ typedef struct { nvmlThermalTarget_t target; } nvmlGpuThermalSettingsSensor_t; +/** + * Struct to hold the thermal sensor settings + */ typedef struct { unsigned int count; @@ -639,6 +716,84 @@ typedef struct } nvmlGpuThermalSettings_t; +/** + * Cooler control type + */ +typedef enum nvmlCoolerControl_enum +{ + NVML_THERMAL_COOLER_SIGNAL_NONE = 0, //!< This cooler has no control signal. + NVML_THERMAL_COOLER_SIGNAL_TOGGLE = 1, //!< This cooler can only be toggled either ON or OFF (eg a switch). + NVML_THERMAL_COOLER_SIGNAL_VARIABLE = 2, //!< This cooler's level can be adjusted from some minimum to some maximum (eg a knob). + + // Keep this last + NVML_THERMAL_COOLER_SIGNAL_COUNT +} nvmlCoolerControl_t; + +/** + * Cooler's target + */ +typedef enum nvmlCoolerTarget_enum +{ + NVML_THERMAL_COOLER_TARGET_NONE = 1 << 0, //!< This cooler cools nothing. + NVML_THERMAL_COOLER_TARGET_GPU = 1 << 1, //!< This cooler can cool the GPU. + NVML_THERMAL_COOLER_TARGET_MEMORY = 1 << 2, //!< This cooler can cool the memory. + NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY = 1 << 3, //!< This cooler can cool the power supply. + NVML_THERMAL_COOLER_TARGET_GPU_RELATED = (NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY) //!< This cooler cools all of the components related to its target gpu. GPU_RELATED = GPU | MEMORY | POWER_SUPPLY +} nvmlCoolerTarget_t; + +typedef struct +{ + unsigned int version; //!< the API version number + unsigned int index; //!< the cooler index + nvmlCoolerControl_t signalType; //!< OUT: the cooler's control signal characteristics + nvmlCoolerTarget_t target; //!< OUT: the target that cooler cools +} nvmlCoolerInfo_v1_t; +typedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t; + +#define nvmlCoolerInfo_v1 NVML_STRUCT_VERSION(CoolerInfo, 1) + +/** + * UUID length in ASCII format + */ +#define NVML_DEVICE_UUID_ASCII_LEN 41 + +/** + * UUID length in binary format + */ +#define NVML_DEVICE_UUID_BINARY_LEN 16 + +/** + * Enum to represent different UUID types + */ +typedef enum +{ + NVML_UUID_TYPE_NONE = 0, //!< Undefined type + NVML_UUID_TYPE_ASCII = 1, //!< ASCII format type + NVML_UUID_TYPE_BINARY = 2, //!< Binary format type +} nvmlUUIDType_t; + +/** + * Union to represent different UUID values + */ +typedef union +{ + char str[NVML_DEVICE_UUID_ASCII_LEN]; //!< ASCII format value + unsigned char bytes[NVML_DEVICE_UUID_BINARY_LEN]; //!< Binary format value +} nvmlUUIDValue_t; + +/** + * Struct to represent NVML UUID information + */ +typedef struct +{ + unsigned int version; //!< API version number + unsigned int type; //!< One of \p nvmlUUIDType_t + nvmlUUIDValue_t value; //!< One of \p nvmlUUIDValue_t, to be set based on the UUID format +} nvmlUUID_v1_t; +typedef nvmlUUID_v1_t nvmlUUID_t; + +#define nvmlUUID_v1 NVML_STRUCT_VERSION(UUID, 1) + /** @} */ /***************************************************************************************************/ @@ -661,6 +816,18 @@ typedef enum nvmlEnableState_enum //! Generic flag used to force some behavior. See description of particular functions for details. #define nvmlFlagForce 0x01 +/** + * DRAM Encryption Info + */ +typedef struct +{ + unsigned int version; //!< IN - the API version number + nvmlEnableState_t encryptionState; //!< IN/OUT - DRAM Encryption state +} nvmlDramEncryptionInfo_v1_t; +typedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t; + +#define nvmlDramEncryptionInfo_v1 NVML_STRUCT_VERSION(DramEncryptionInfo, 1) + /** * * The Brand of the GPU * */ @@ -708,6 +875,8 @@ typedef enum nvmlTemperatureThresholds_enum // acoustic threshold. NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6, // Maximum GPU temperature that can be // set as acoustic threshold. + NVML_TEMPERATURE_THRESHOLD_GPS_CURR = 7, // Current temperature that is set as + // gps threshold. // Keep this last NVML_TEMPERATURE_THRESHOLD_COUNT } nvmlTemperatureThresholds_t; @@ -723,6 +892,19 @@ typedef enum nvmlTemperatureSensors_enum NVML_TEMPERATURE_COUNT } nvmlTemperatureSensors_t; +/** + * Margin temperature values + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + int marginTemperature; //!< The margin temperature value +} nvmlMarginTemperature_v1_t; + +typedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t; + +#define nvmlMarginTemperature_v1 NVML_STRUCT_VERSION(MarginTemperature, 1) + /** * Compute mode. * @@ -744,7 +926,7 @@ typedef enum nvmlComputeMode_enum /** * Max Clock Monitors available */ -#define MAX_CLK_DOMAINS 32 +#define MAX_CLK_DOMAINS 32 /** * Clock Monitor error types @@ -822,12 +1004,26 @@ typedef enum nvmlMemoryErrorType_enum */ NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, - // Keep this last NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types } nvmlMemoryErrorType_t; +/** + * Represents Nvlink Version + */ +typedef enum nvmlNvlinkVersion_enum +{ + NVML_NVLINK_VERSION_INVALID = 0, + NVML_NVLINK_VERSION_1_0 = 1, + NVML_NVLINK_VERSION_2_0 = 2, + NVML_NVLINK_VERSION_2_2 = 3, + NVML_NVLINK_VERSION_3_0 = 4, + NVML_NVLINK_VERSION_3_1 = 5, + NVML_NVLINK_VERSION_4_0 = 6, + NVML_NVLINK_VERSION_5_0 = 7, +}nvmlNvlinkVersion_t; + /** * ECC counter types. * @@ -884,8 +1080,9 @@ typedef enum nvmlClockId_enum typedef enum nvmlDriverModel_enum { - NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device - NVML_DRIVER_WDM = 1 //!< WDM (TCC) model (recommended) -- GPU treated as a generic device + NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device + NVML_DRIVER_WDM = 1, //!< WDM (TCC) model (deprecated) -- GPU treated as a generic compute device + NVML_DRIVER_MCDM = 2 //!< MCDM driver model -- GPU treated as a Microsoft compute device } nvmlDriverModel_t; #define NVML_MAX_GPU_PERF_PSTATES 16 @@ -914,6 +1111,62 @@ typedef enum nvmlPStates_enum NVML_PSTATE_UNKNOWN = 32 //!< Unknown performance state } nvmlPstates_t; +/** + * Clock offset info. + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + nvmlClockType_t type; + nvmlPstates_t pstate; + int clockOffsetMHz; + int minClockOffsetMHz; + int maxClockOffsetMHz; +} nvmlClockOffset_v1_t; + +typedef nvmlClockOffset_v1_t nvmlClockOffset_t; + +#define nvmlClockOffset_v1 NVML_STRUCT_VERSION(ClockOffset, 1) + +/** + * Fan speed info. + */ +typedef struct +{ + unsigned int version; //!< the API version number + unsigned int fan; //!< the fan index + unsigned int speed; //!< OUT: the fan speed in RPM +} nvmlFanSpeedInfo_v1_t; +typedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t; + +#define nvmlFanSpeedInfo_v1 NVML_STRUCT_VERSION(FanSpeedInfo, 1) + +#define NVML_PERF_MODES_BUFFER_SIZE 2048 + +/** + * Device performance modes string + */ +typedef struct +{ + unsigned int version; //!< the API version number + char str[NVML_PERF_MODES_BUFFER_SIZE]; //!< OUT: the performance modes string. +} nvmlDevicePerfModes_v1_t; +typedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t; + +#define nvmlDevicePerfModes_v1 NVML_STRUCT_VERSION(DevicePerfModes, 1) + +/** + * Device current clocks string + */ +typedef struct +{ + unsigned int version; //!< the API version number + char str[NVML_PERF_MODES_BUFFER_SIZE]; //!< OUT: the current clock frequency string. +} nvmlDeviceCurrentClockFreqs_v1_t; +typedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t; + +#define nvmlDeviceCurrentClockFreqs_v1 NVML_STRUCT_VERSION(DeviceCurrentClockFreqs, 1) + /** * GPU Operation Mode * @@ -940,7 +1193,7 @@ typedef enum nvmlInforomObject_enum NVML_INFOROM_OEM = 0, //!< An object defined by OEM NVML_INFOROM_ECC = 1, //!< The ECC object determining the level of ECC support NVML_INFOROM_POWER = 2, //!< The power management object - + NVML_INFOROM_DEN = 3, //!< DRAM Encryption object // Keep this last NVML_INFOROM_COUNT //!< This counts the number of infoROM objects the driver knows about } nvmlInforomObject_t; @@ -977,7 +1230,10 @@ typedef enum nvmlReturn_enum NVML_ERROR_INSUFFICIENT_RESOURCES = 23, //!< Ran out of critical resources, other than memory NVML_ERROR_FREQ_NOT_SUPPORTED = 24, //!< Ran out of critical resources, other than memory NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25, //!< The provided version is invalid/unsupported - NVML_ERROR_DEPRECATED = 26, //!< The requested functionality has been deprecated + NVML_ERROR_DEPRECATED = 26, //!< The requested functionality has been deprecated + NVML_ERROR_NOT_READY = 27, //!< The system is not ready for the request + NVML_ERROR_GPU_NOT_FOUND = 28, //!< No GPUs were found + NVML_ERROR_INVALID_STATE = 29, //!< Resource not in correct state to perform requested operation NVML_ERROR_UNKNOWN = 999 //!< An internal driver error occurred } nvmlReturn_t; @@ -1024,136 +1280,380 @@ typedef enum nvmlRestrictedAPI_enum NVML_RESTRICTED_API_COUNT } nvmlRestrictedAPI_t; -/** @} */ - -/***************************************************************************************************/ -/** @addtogroup virtualGPU - * @{ - */ -/***************************************************************************************************/ -/** @defgroup nvmlVirtualGpuEnums vGPU Enums - * @{ - */ -/***************************************************************************************************/ - -/*! - * GPU virtualization mode types. - */ -typedef enum nvmlGpuVirtualizationMode { - NVML_GPU_VIRTUALIZATION_MODE_NONE = 0, //!< Represents Bare Metal GPU - NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1, //!< Device is associated with GPU-Passthorugh - NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2, //!< Device is associated with vGPU inside virtual machine. - NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3, //!< Device is associated with VGX hypervisor in vGPU mode - NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4 //!< Device is associated with VGX hypervisor in vSGA mode -} nvmlGpuVirtualizationMode_t; - /** - * Host vGPU modes + * Structure to store utilization value and process Id */ -typedef enum nvmlHostVgpuMode_enum +typedef struct nvmlProcessUtilizationSample_st { - NVML_HOST_VGPU_MODE_NON_SRIOV = 0, //!< Non SR-IOV mode - NVML_HOST_VGPU_MODE_SRIOV = 1 //!< SR-IOV mode -} nvmlHostVgpuMode_t; - -/*! - * Types of VM identifiers - */ -typedef enum nvmlVgpuVmIdType { - NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID - NVML_VGPU_VM_ID_UUID = 1 //!< VM ID represents UUID -} nvmlVgpuVmIdType_t; + unsigned int pid; //!< PID of process + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value +} nvmlProcessUtilizationSample_t; /** - * vGPU GUEST info state + * Structure to store utilization value and process Id -- version 1 */ -typedef enum nvmlVgpuGuestInfoState_enum +typedef struct { - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //!< Guest-dependent fields uninitialized - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1 //!< Guest-dependent fields initialized -} nvmlVgpuGuestInfoState_t; + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int pid; //!< PID of process + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value + unsigned int jpgUtil; //!< Jpeg Util Value + unsigned int ofaUtil; //!< Ofa Util Value +} nvmlProcessUtilizationInfo_v1_t; /** - * vGPU software licensable features + * Structure to store utilization and process ID for each running process -- version 1 */ -typedef enum { - NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0, //!< Unknown - NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU - NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2, //!< Nvidia RTX - NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX, //!< Deprecated, do not use. - NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3, //!< Gaming - NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4 //!< Compute -} nvmlGridLicenseFeatureCode_t; +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int processSamplesCount; //!< Caller-supplied array size, and returns number of processes running + unsigned long long lastSeenTimeStamp; //!< Return only samples with timestamp greater than lastSeenTimeStamp + nvmlProcessUtilizationInfo_v1_t *procUtilArray; //!< The array (allocated by caller) of the utilization of GPU SM, framebuffer, video encoder, video decoder, JPEG, and OFA +} nvmlProcessesUtilizationInfo_v1_t; +typedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t; +#define nvmlProcessesUtilizationInfo_v1 NVML_STRUCT_VERSION(ProcessesUtilizationInfo, 1) /** - * Status codes for license expiry + * Structure to store SRAM uncorrectable error counters */ -#define NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE 0 //!< Expiry information not available -#define NVML_GRID_LICENSE_EXPIRY_INVALID 1 //!< Invalid expiry or error fetching expiry -#define NVML_GRID_LICENSE_EXPIRY_VALID 2 //!< Valid expiry -#define NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE 3 //!< Expiry not applicable -#define NVML_GRID_LICENSE_EXPIRY_PERMANENT 4 //!< Permanent expiry - -/** - * vGPU queryable capabilities +typedef struct +{ + unsigned int version; //!< the API version number + unsigned long long aggregateUncParity; //!< aggregate uncorrectable parity error count + unsigned long long aggregateUncSecDed; //!< aggregate uncorrectable SEC-DED error count + unsigned long long aggregateCor; //!< aggregate correctable error count + unsigned long long volatileUncParity; //!< volatile uncorrectable parity error count + unsigned long long volatileUncSecDed; //!< volatile uncorrectable SEC-DED error count + unsigned long long volatileCor; //!< volatile correctable error count + unsigned long long aggregateUncBucketL2; //!< aggregate uncorrectable error count for L2 cache bucket + unsigned long long aggregateUncBucketSm; //!< aggregate uncorrectable error count for SM bucket + unsigned long long aggregateUncBucketPcie; //!< aggregate uncorrectable error count for PCIE bucket + unsigned long long aggregateUncBucketMcu; //!< aggregate uncorrectable error count for Microcontroller bucket + unsigned long long aggregateUncBucketOther; //!< aggregate uncorrectable error count for Other bucket + unsigned int bThresholdExceeded; //!< if the error threshold of field diag is exceeded +} nvmlEccSramErrorStatus_v1_t; + +typedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t; +#define nvmlEccSramErrorStatus_v1 NVML_STRUCT_VERSION(EccSramErrorStatus, 1) + +/** + * Structure to store platform information + * + * @deprecated The nvmlPlatformInfo_v1_t will be deprecated in the subsequent releases. + * Use nvmlPlatformInfo_v2_t */ -typedef enum nvmlVgpuCapability_enum +typedef struct { - NVML_VGPU_CAP_NVLINK_P2P = 0, //!< P2P over NVLink is supported - NVML_VGPU_CAP_GPUDIRECT = 1, //!< GPUDirect capability is supported - NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2, //!< vGPU profile cannot be mixed with other vGPU profiles in same VM - NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3, //!< vGPU profile cannot run on a GPU alongside other profiles of different type - NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4, //!< vGPU profile cannot run on a GPU alongside other profiles of different size - // Keep this last - NVML_VGPU_CAP_COUNT -} nvmlVgpuCapability_t; - + unsigned int version; //!< the API version number + unsigned char ibGuid[16]; //!< Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero) + unsigned char rackGuid[16]; //!< GUID of the rack containing this GPU (for Blackwell rackGuid is 13 bytes so indices 13-15 are zero) + unsigned char chassisPhysicalSlotNumber; //!< The slot number in the rack containing this GPU (includes switches) + unsigned char computeSlotIndex; //!< The index within the compute slots in the rack containing this GPU (does not include switches) + unsigned char nodeIndex; //!< Index of the node within the slot containing this GPU + unsigned char peerType; //!< Platform indicated NVLink-peer type (e.g. switch present or not) + unsigned char moduleId; //!< ID of this GPU within the node +} nvmlPlatformInfo_v1_t; +#define nvmlPlatformInfo_v1 NVML_STRUCT_VERSION(PlatformInfo, 1) /** -* vGPU driver queryable capabilities -*/ -typedef enum nvmlVgpuDriverCapability_enum + * Structure to store platform information (v2) + */ +typedef struct { - NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0, //!< Supports mixing of different vGPU profiles within one guest VM - // Keep this last - NVML_VGPU_DRIVER_CAP_COUNT -} nvmlVgpuDriverCapability_t; + unsigned int version; //!< the API version number + unsigned char ibGuid[16]; //!< Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero) + unsigned char chassisSerialNumber[16]; //!< Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero) + unsigned char slotNumber; //!< The slot number in the chassis containing this GPU (includes switches) + unsigned char trayIndex; //!< The tray index within the compute slots in the chassis containing this GPU (does not include switches) + unsigned char hostId; //!< Index of the node within the slot containing this GPU + unsigned char peerType; //!< Platform indicated NVLink-peer type (e.g. switch present or not) + unsigned char moduleId; //!< ID of this GPU within the node +} nvmlPlatformInfo_v2_t; +typedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t; +#define nvmlPlatformInfo_v2 NVML_STRUCT_VERSION(PlatformInfo, 2) /** -* Device vGPU queryable capabilities -*/ -typedef enum nvmlDeviceVgpuCapability_enum -{ - NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0, //!< Fractional vGPU profiles on this GPU can be used in multi-vGPU configurations - NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1, //!< Supports concurrent execution of timesliced vGPU profiles of differing types - NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2, //!< Supports concurrent execution of timesliced vGPU profiles of differing framebuffer sizes - // Keep this last - NVML_DEVICE_VGPU_CAP_COUNT -} nvmlDeviceVgpuCapability_t; - -/** @} */ - -/***************************************************************************************************/ - -/** @defgroup nvmlVgpuConstants vGPU Constants - * @{ + * GSP firmware */ -/***************************************************************************************************/ +#define NVML_GSP_FIRMWARE_VERSION_BUF_SIZE 0x40 /** - * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense + * Simplified chip architecture */ -#define NVML_GRID_LICENSE_BUFFER_SIZE 128 - -#define NVML_VGPU_NAME_BUFFER_SIZE 64 - +#define NVML_DEVICE_ARCH_KEPLER 2 // Devices based on the NVIDIA Kepler architecture +#define NVML_DEVICE_ARCH_MAXWELL 3 // Devices based on the NVIDIA Maxwell architecture +#define NVML_DEVICE_ARCH_PASCAL 4 // Devices based on the NVIDIA Pascal architecture +#define NVML_DEVICE_ARCH_VOLTA 5 // Devices based on the NVIDIA Volta architecture +#define NVML_DEVICE_ARCH_TURING 6 // Devices based on the NVIDIA Turing architecture +#define NVML_DEVICE_ARCH_AMPERE 7 // Devices based on the NVIDIA Ampere architecture +#define NVML_DEVICE_ARCH_ADA 8 // Devices based on the NVIDIA Ada architecture +#define NVML_DEVICE_ARCH_HOPPER 9 // Devices based on the NVIDIA Hopper architecture + +#define NVML_DEVICE_ARCH_BLACKWELL 10 // Devices based on the NVIDIA Blackwell architecture + +#define NVML_DEVICE_ARCH_T23X 11 // Devices based on NVIDIA Orin architecture + +#define NVML_DEVICE_ARCH_UNKNOWN 0xffffffff // Anything else, presumably something newer + +typedef unsigned int nvmlDeviceArchitecture_t; + +/** + * PCI bus types + */ +#define NVML_BUS_TYPE_UNKNOWN 0 +#define NVML_BUS_TYPE_PCI 1 +#define NVML_BUS_TYPE_PCIE 2 +#define NVML_BUS_TYPE_FPCI 3 +#define NVML_BUS_TYPE_AGP 4 + +typedef unsigned int nvmlBusType_t; + +/** + * Device Power Modes + */ + +/** + * Device Fan control policy + */ +#define NVML_FAN_POLICY_TEMPERATURE_CONTINOUS_SW 0 +#define NVML_FAN_POLICY_MANUAL 1 + +typedef unsigned int nvmlFanControlPolicy_t; + +/** + * Device Power Source + */ +#define NVML_POWER_SOURCE_AC 0x00000000 +#define NVML_POWER_SOURCE_BATTERY 0x00000001 +#define NVML_POWER_SOURCE_UNDERSIZED 0x00000002 + +typedef unsigned int nvmlPowerSource_t; + +/** + * Device PCIE link Max Speed + */ +#define NVML_PCIE_LINK_MAX_SPEED_INVALID 0x00000000 +#define NVML_PCIE_LINK_MAX_SPEED_2500MBPS 0x00000001 +#define NVML_PCIE_LINK_MAX_SPEED_5000MBPS 0x00000002 +#define NVML_PCIE_LINK_MAX_SPEED_8000MBPS 0x00000003 +#define NVML_PCIE_LINK_MAX_SPEED_16000MBPS 0x00000004 +#define NVML_PCIE_LINK_MAX_SPEED_32000MBPS 0x00000005 +#define NVML_PCIE_LINK_MAX_SPEED_64000MBPS 0x00000006 + +/** + * Adaptive clocking status + */ +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED 0x00000000 +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED 0x00000001 + +#define NVML_MAX_GPU_UTILIZATIONS 8 + +/** + * Represents the GPU utilization domains + */ +typedef enum nvmlGpuUtilizationDomainId_t +{ + NVML_GPU_UTILIZATION_DOMAIN_GPU = 0, //!< Graphics engine domain + NVML_GPU_UTILIZATION_DOMAIN_FB = 1, //!< Frame buffer domain + NVML_GPU_UTILIZATION_DOMAIN_VID = 2, //!< Video engine domain + NVML_GPU_UTILIZATION_DOMAIN_BUS = 3, //!< Bus interface domain +} nvmlGpuUtilizationDomainId_t; + +typedef struct { + unsigned int bIsPresent; + unsigned int percentage; + unsigned int incThreshold; + unsigned int decThreshold; +} nvmlGpuDynamicPstatesInfoUtilization_t; + +typedef struct nvmlGpuDynamicPstatesInfo_st +{ + unsigned int flags; //!< Reserved for future use + nvmlGpuDynamicPstatesInfoUtilization_t utilization[NVML_MAX_GPU_UTILIZATIONS]; +} nvmlGpuDynamicPstatesInfo_t; + +/* + * PCIe outbound/inbound atomic operations capability + */ +#define NVML_PCIE_ATOMICS_CAP_FETCHADD32 0x01 +#define NVML_PCIE_ATOMICS_CAP_FETCHADD64 0x02 +#define NVML_PCIE_ATOMICS_CAP_SWAP32 0x04 +#define NVML_PCIE_ATOMICS_CAP_SWAP64 0x08 +#define NVML_PCIE_ATOMICS_CAP_CAS32 0x10 +#define NVML_PCIE_ATOMICS_CAP_CAS64 0x20 +#define NVML_PCIE_ATOMICS_CAP_CAS128 0x40 +#define NVML_PCIE_ATOMICS_OPS_MAX 7 + +/** + * Device Scope - This is useful to retrieve the telemetry at GPU and module (e.g. GPU + CPU) level + */ +#define NVML_POWER_SCOPE_GPU 0U //!< Targets only GPU +#define NVML_POWER_SCOPE_MODULE 1U //!< Targets the whole module +#define NVML_POWER_SCOPE_MEMORY 2U //!< Targets the GPU Memory + +typedef unsigned char nvmlPowerScopeType_t; + +/** + * Contains the power management limit + */ +typedef struct +{ + unsigned int version; //!< Structure format version (must be 1) + nvmlPowerScopeType_t powerScope; //!< [in] Device type: GPU or Total Module + unsigned int powerValueMw; //!< [out] Power value to retrieve or set in milliwatts +} nvmlPowerValue_v2_t; + +#define nvmlPowerValue_v2 NVML_STRUCT_VERSION(PowerValue, 2) + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup virtualGPU vGPU Enums, Constants, Structs + * @{ + */ +/***************************************************************************************************/ +/** @defgroup nvmlVirtualGpuEnums vGPU Enums + * @{ + */ +/***************************************************************************************************/ + +/*! + * GPU virtualization mode types. + */ +typedef enum nvmlGpuVirtualizationMode { + NVML_GPU_VIRTUALIZATION_MODE_NONE = 0, //!< Represents Bare Metal GPU + NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1, //!< Device is associated with GPU-Passthorugh + NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2, //!< Device is associated with vGPU inside virtual machine. + NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3, //!< Device is associated with VGX hypervisor in vGPU mode + NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4 //!< Device is associated with VGX hypervisor in vSGA mode +} nvmlGpuVirtualizationMode_t; + +/** + * Host vGPU modes + */ +typedef enum nvmlHostVgpuMode_enum +{ + NVML_HOST_VGPU_MODE_NON_SRIOV = 0, //!< Non SR-IOV mode + NVML_HOST_VGPU_MODE_SRIOV = 1 //!< SR-IOV mode +} nvmlHostVgpuMode_t; + +/*! + * Types of VM identifiers + */ +typedef enum nvmlVgpuVmIdType { + NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID + NVML_VGPU_VM_ID_UUID = 1 //!< VM ID represents UUID +} nvmlVgpuVmIdType_t; + +/** + * vGPU GUEST info state + */ +typedef enum nvmlVgpuGuestInfoState_enum +{ + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //!< Guest-dependent fields uninitialized + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1 //!< Guest-dependent fields initialized +} nvmlVgpuGuestInfoState_t; + +/** + * vGPU software licensable features + */ +typedef enum { + NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0, //!< Unknown + NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU + NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2, //!< Nvidia RTX + NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX, //!< Deprecated, do not use. + NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3, //!< Gaming + NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4 //!< Compute +} nvmlGridLicenseFeatureCode_t; + +/** + * Status codes for license expiry + */ +#define NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE 0 //!< Expiry information not available +#define NVML_GRID_LICENSE_EXPIRY_INVALID 1 //!< Invalid expiry or error fetching expiry +#define NVML_GRID_LICENSE_EXPIRY_VALID 2 //!< Valid expiry +#define NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE 3 //!< Expiry not applicable +#define NVML_GRID_LICENSE_EXPIRY_PERMANENT 4 //!< Permanent expiry + +/** + * vGPU queryable capabilities + */ +typedef enum nvmlVgpuCapability_enum +{ + NVML_VGPU_CAP_NVLINK_P2P = 0, //!< P2P over NVLink is supported + NVML_VGPU_CAP_GPUDIRECT = 1, //!< GPUDirect capability is supported + NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2, //!< vGPU profile cannot be mixed with other vGPU profiles in same VM + NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3, //!< vGPU profile cannot run on a GPU alongside other profiles of different type + NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4, //!< vGPU profile cannot run on a GPU alongside other profiles of different size + // Keep this last + NVML_VGPU_CAP_COUNT +} nvmlVgpuCapability_t; + +/** +* vGPU driver queryable capabilities +*/ +typedef enum nvmlVgpuDriverCapability_enum +{ + NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0, //!< Supports mixing of different vGPU profiles within one guest VM + NVML_VGPU_DRIVER_CAP_WARM_UPDATE = 1, //!< Supports FSR and warm update of vGPU host driver without terminating the running guest VM + // Keep this last + NVML_VGPU_DRIVER_CAP_COUNT +} nvmlVgpuDriverCapability_t; + +/** +* Device vGPU queryable capabilities +*/ +typedef enum nvmlDeviceVgpuCapability_enum +{ + NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0, //!< Query whether the fractional vGPU profiles on this GPU can be used in multi-vGPU configurations + NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1, //!< Query whether the GPU support concurrent execution of timesliced vGPU profiles of differing types + NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2, //!< Query whether the GPU support concurrent execution of timesliced vGPU profiles of differing framebuffer sizes + NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = 3, //!< Query the GPU's read_device_buffer expected bandwidth capacity in megabytes per second + NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = 4, //!< Query the GPU's write_device_buffer expected bandwidth capacity in megabytes per second + NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING = 5, //!< Query whether the vGPU profiles on the GPU supports migration data streaming + NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU = 6, //!< Set/Get support for mini-quarter vGPU profiles + NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = 7, //!< Set/Get support for compute media engine vGPU profiles + NVML_DEVICE_VGPU_CAP_WARM_UPDATE = 8, //!< Query whether the GPU supports FSR and warm update + NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = 9, //!< Query whether the GPU supports reporting of placements of timesliced vGPU profiles with identical framebuffer sizes + NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = 10, //!< Query whether the GPU supports timesliced vGPU on MIG + NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = 11, //!< Set/Get MIG timesliced mode reporting, without impacting the underlying functionality + // Keep this last + NVML_DEVICE_VGPU_CAP_COUNT +} nvmlDeviceVgpuCapability_t; + +/** @} */ + +/***************************************************************************************************/ + +/** @defgroup nvmlVgpuConstants vGPU Constants + * @{ + */ +/***************************************************************************************************/ + +/** + * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense + */ +#define NVML_GRID_LICENSE_BUFFER_SIZE 128 + +#define NVML_VGPU_NAME_BUFFER_SIZE 64 + #define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 #define INVALID_GPU_INSTANCE_PROFILE_ID 0xFFFFFFFF #define INVALID_GPU_INSTANCE_ID 0xFFFFFFFF +#define NVML_INVALID_VGPU_PLACEMENT_ID 0xFFFF + /*! * Macros for vGPU instance's virtualization capabilities bitfield. */ @@ -1168,6 +1668,12 @@ typedef enum nvmlDeviceVgpuCapability_enum #define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 #define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 +/** + * Macros to indicate the vGPU mode of the GPU. + */ +#define NVML_VGPU_PGPU_HETEROGENEOUS_MODE 0 +#define NVML_VGPU_PGPU_HOMOGENEOUS_MODE 1 + /** @} */ /***************************************************************************************************/ @@ -1180,6 +1686,65 @@ typedef unsigned int nvmlVgpuTypeId_t; typedef unsigned int nvmlVgpuInstance_t; +/** + * Structure to store the vGPU heterogeneous mode of device -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int mode; //!< The vGPU heterogeneous mode +} nvmlVgpuHeterogeneousMode_v1_t; +typedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t; +#define nvmlVgpuHeterogeneousMode_v1 NVML_STRUCT_VERSION(VgpuHeterogeneousMode, 1) + +/** + * Structure to store the placement ID of vGPU instance -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int placementId; //!< Placement ID of the active vGPU instance +} nvmlVgpuPlacementId_v1_t; +typedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t; +#define nvmlVgpuPlacementId_v1 NVML_STRUCT_VERSION(VgpuPlacementId, 1) + +/** + * Structure to store the list of vGPU placements -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int placementSize; //!< The number of slots occupied by the vGPU type + unsigned int count; //!< Count of placement IDs fetched + unsigned int *placementIds; //!< Placement IDs for the vGPU type +} nvmlVgpuPlacementList_v1_t; +#define nvmlVgpuPlacementList_v1 NVML_STRUCT_VERSION(VgpuPlacementList, 1) + +/** + * Structure to store the list of vGPU placements -- version 2 + */ +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + unsigned int placementSize; //!< OUT: The number of slots occupied by the vGPU type + unsigned int count; //!< IN/OUT: Count of the placement IDs + unsigned int *placementIds; //!< IN/OUT: Placement IDs for the vGPU type + unsigned int mode; //!< IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE +} nvmlVgpuPlacementList_v2_t; +typedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t; +#define nvmlVgpuPlacementList_v2 NVML_STRUCT_VERSION(VgpuPlacementList, 2) + +/** + * Structure to store BAR1 size information of vGPU type -- Version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned long long bar1Size; //!< BAR1 size in megabytes +} nvmlVgpuTypeBar1Info_v1_t; +typedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t; +#define nvmlVgpuTypeBar1Info_v1 NVML_STRUCT_VERSION(VgpuTypeBar1Info, 1) + /** * Structure to store Utilization Value and vgpuInstance */ @@ -1193,6 +1758,35 @@ typedef struct nvmlVgpuInstanceUtilizationSample_st nvmlValue_t decUtil; //!< Decoder Util Value } nvmlVgpuInstanceUtilizationSample_t; +/** + * Structure to store Utilization Value and vgpuInstance Info -- Version 1 + */ +typedef struct +{ + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value + nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value + nvmlValue_t encUtil; //!< Encoder Util Value + nvmlValue_t decUtil; //!< Decoder Util Value + nvmlValue_t jpgUtil; //!< Jpeg Util Value + nvmlValue_t ofaUtil; //!< Ofa Util Value +} nvmlVgpuInstanceUtilizationInfo_v1_t; + +/** + * Structure to store recent utilization for vGPU instances running on a device -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + nvmlValueType_t sampleValType; //!< Hold the type of returned sample values + unsigned int vgpuInstanceCount; //!< Hold the number of vGPU instances + unsigned long long lastSeenTimeStamp; //!< Return only samples with timestamp greater than lastSeenTimeStamp + nvmlVgpuInstanceUtilizationInfo_v1_t *vgpuUtilArray; //!< The array (allocated by caller) in which vGPU utilization are returned +} nvmlVgpuInstancesUtilizationInfo_v1_t; +typedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t; +#define nvmlVgpuInstancesUtilizationInfo_v1 NVML_STRUCT_VERSION(VgpuInstancesUtilizationInfo, 1) + /** * Structure to store Utilization Value, vgpuInstance and subprocess information */ @@ -1208,6 +1802,47 @@ typedef struct nvmlVgpuProcessUtilizationSample_st unsigned int decUtil; //!< Decoder Util Value } nvmlVgpuProcessUtilizationSample_t; +/** + * Structure to store Utilization Value, vgpuInstance and subprocess information for process running on vGPU instance -- version 1 + */ +typedef struct +{ + char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned int pid; //!< PID of process running within the vGPU VM + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value + unsigned int jpgUtil; //!< Jpeg Util Value + unsigned int ofaUtil; //!< Ofa Util Value +} nvmlVgpuProcessUtilizationInfo_v1_t; + +/** + * Structure to store recent utilization, vgpuInstance and subprocess information for processes running on vGPU instances active on a device -- version 1 + */ +typedef struct +{ + unsigned int version; //!< The version number of this struct + unsigned int vgpuProcessCount; //!< Hold the number of processes running on vGPU instances + unsigned long long lastSeenTimeStamp; //!< Return only samples with timestamp greater than lastSeenTimeStamp + nvmlVgpuProcessUtilizationInfo_v1_t *vgpuProcUtilArray; //!< The array (allocated by caller) in which utilization of processes running on vGPU instances are returned +} nvmlVgpuProcessesUtilizationInfo_v1_t; +typedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t; +#define nvmlVgpuProcessesUtilizationInfo_v1 NVML_STRUCT_VERSION(VgpuProcessesUtilizationInfo, 1) + +/** + * Structure to store the information of vGPU runtime state -- version 1 + */ +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + unsigned long long size; //!< OUT: The runtime state size of the vGPU instance +} nvmlVgpuRuntimeState_v1_t; +typedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t; +#define nvmlVgpuRuntimeState_v1 NVML_STRUCT_VERSION(VgpuRuntimeState, 1) + /** * vGPU scheduler policies */ @@ -1220,6 +1855,15 @@ typedef struct nvmlVgpuProcessUtilizationSample_st #define NVML_SCHEDULER_SW_MAX_LOG_ENTRIES 200 +#define NVML_VGPU_SCHEDULER_ARR_DEFAULT 0 +#define NVML_VGPU_SCHEDULER_ARR_DISABLE 1 +#define NVML_VGPU_SCHEDULER_ARR_ENABLE 2 + +/** + * vGPU scheduler engine types + */ +#define NVML_VGPU_SCHEDULER_ENGINE_TYPE_GRAPHICS 1 + typedef struct { unsigned int avgFactor; unsigned int timeslice; @@ -1260,7 +1904,7 @@ typedef struct nvmlVgpuSchedulerLog_st { unsigned int engineId; //!< Engine whose software runlist log entries are fetched unsigned int schedulerPolicy; //!< Scheduler policy - unsigned int isEnabledARR; //!< Flag to check Adaptive Round Robin scheduler mode + unsigned int arrMode; //!< Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*. nvmlVgpuSchedulerParams_t schedulerParams; unsigned int entriesCount; //!< Count of log entries fetched nvmlVgpuSchedulerLogEntry_t logEntries[NVML_SCHEDULER_SW_MAX_LOG_ENTRIES]; @@ -1272,7 +1916,7 @@ typedef struct nvmlVgpuSchedulerLog_st typedef struct nvmlVgpuSchedulerGetState_st { unsigned int schedulerPolicy; //!< Scheduler policy - unsigned int isEnabledARR; //!< Flag to check Adaptive Round Robin scheduler mode + unsigned int arrMode; //!< Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*. nvmlVgpuSchedulerParams_t schedulerParams; } nvmlVgpuSchedulerGetState_t; @@ -1302,7 +1946,7 @@ typedef union typedef struct nvmlVgpuSchedulerSetState_st { unsigned int schedulerPolicy; //!< Scheduler policy - unsigned int enableARRMode; //!< Flag to enable/disable Adaptive Round Robin scheduler + unsigned int enableARRMode; //!< Adaptive Round Robin scheduler nvmlVgpuSchedulerSetParams_t schedulerParams; } nvmlVgpuSchedulerSetState_t; @@ -1352,19 +1996,6 @@ typedef struct nvmlVgpuLicenseInfo_st unsigned int currentState; //!< Current license state } nvmlVgpuLicenseInfo_t; -/** - * Structure to store utilization value and process Id - */ -typedef struct nvmlProcessUtilizationSample_st -{ - unsigned int pid; //!< PID of process - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - unsigned int smUtil; //!< SM (3D/Compute) Util Value - unsigned int memUtil; //!< Frame Buffer Memory Util Value - unsigned int encUtil; //!< Encoder Util Value - unsigned int decUtil; //!< Decoder Util Value -} nvmlProcessUtilizationSample_t; - /** * Structure to store license expiry date and time values */ @@ -1403,98 +2034,109 @@ typedef struct nvmlGridLicensableFeatures_st } nvmlGridLicensableFeatures_t; /** - * GSP firmware + * Enum describing the GPU Recovery Action */ -#define NVML_GSP_FIRMWARE_VERSION_BUF_SIZE 0x40 +typedef enum nvmlDeviceGpuRecoveryAction_s { + NVML_GPU_RECOVERY_ACTION_NONE = 0, + NVML_GPU_RECOVERY_ACTION_GPU_RESET = 1, + NVML_GPU_RECOVERY_ACTION_NODE_REBOOT = 2, + NVML_GPU_RECOVERY_ACTION_DRAIN_P2P = 3, + NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET = 4, +} nvmlDeviceGpuRecoveryAction_t; /** - * Simplified chip architecture + * Structure to store the vGPU type IDs -- version 1 */ -#define NVML_DEVICE_ARCH_KEPLER 2 // Devices based on the NVIDIA Kepler architecture -#define NVML_DEVICE_ARCH_MAXWELL 3 // Devices based on the NVIDIA Maxwell architecture -#define NVML_DEVICE_ARCH_PASCAL 4 // Devices based on the NVIDIA Pascal architecture -#define NVML_DEVICE_ARCH_VOLTA 5 // Devices based on the NVIDIA Volta architecture -#define NVML_DEVICE_ARCH_TURING 6 // Devices based on the NVIDIA Turing architecture - -#define NVML_DEVICE_ARCH_AMPERE 7 // Devices based on the NVIDIA Ampere architecture - -#define NVML_DEVICE_ARCH_ADA 8 // Devices based on the NVIDIA Ada architecture - -#define NVML_DEVICE_ARCH_HOPPER 9 // Devices based on the NVIDIA Hopper architecture - -#define NVML_DEVICE_ARCH_UNKNOWN 0xffffffff // Anything else, presumably something newer - -typedef unsigned int nvmlDeviceArchitecture_t; +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + unsigned int vgpuCount; //!< IN/OUT: Number of vGPU types + nvmlVgpuTypeId_t *vgpuTypeIds; //!< OUT: List of vGPU type IDs +} nvmlVgpuTypeIdInfo_v1_t; +typedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t; +#define nvmlVgpuTypeIdInfo_v1 NVML_STRUCT_VERSION(VgpuTypeIdInfo, 1) /** - * PCI bus types + * Structure to store the maximum number of possible vGPU type IDs -- version 1 */ -#define NVML_BUS_TYPE_UNKNOWN 0 -#define NVML_BUS_TYPE_PCI 1 -#define NVML_BUS_TYPE_PCIE 2 -#define NVML_BUS_TYPE_FPCI 3 -#define NVML_BUS_TYPE_AGP 4 - -typedef unsigned int nvmlBusType_t; +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + nvmlVgpuTypeId_t vgpuTypeId; //!< IN: Handle to vGPU type + unsigned int maxInstancePerGI; //!< OUT: Maximum number of vGPU instances per GPU instance +} nvmlVgpuTypeMaxInstance_v1_t; +typedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t; +#define nvmlVgpuTypeMaxInstance_v1 NVML_STRUCT_VERSION(VgpuTypeMaxInstance, 1) /** - * Device Power Modes + * Structure to store active vGPU instance information -- Version 1 */ +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + unsigned int vgpuCount; //!< IN/OUT: Count of the active vGPU instances + nvmlVgpuInstance_t *vgpuInstances; //!< IN/OUT: list of active vGPU instances +} nvmlActiveVgpuInstanceInfo_v1_t; +typedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t; +#define nvmlActiveVgpuInstanceInfo_v1 NVML_STRUCT_VERSION(ActiveVgpuInstanceInfo, 1) /** - * Device Fan control policy + * Structure to set vGPU scheduler state information -- version 1 */ -#define NVML_FAN_POLICY_TEMPERATURE_CONTINOUS_SW 0 -#define NVML_FAN_POLICY_MANUAL 1 - -typedef unsigned int nvmlFanControlPolicy_t; +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + unsigned int engineId; //!< IN: One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*. + unsigned int schedulerPolicy; //!< IN: Scheduler policy + unsigned int enableARRMode; //!< IN: Adaptive Round Robin scheduler + nvmlVgpuSchedulerSetParams_t schedulerParams; //!< IN: vGPU Scheduler Parameters +} nvmlVgpuSchedulerState_v1_t; +typedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t; +#define nvmlVgpuSchedulerState_v1 NVML_STRUCT_VERSION(VgpuSchedulerState, 1) /** - * Device Power Source + * Structure to store vGPU scheduler state information -- Version 1 */ -#define NVML_POWER_SOURCE_AC 0x00000000 -#define NVML_POWER_SOURCE_BATTERY 0x00000001 - -typedef unsigned int nvmlPowerSource_t; - -/* - * Device PCIE link Max Speed - */ -#define NVML_PCIE_LINK_MAX_SPEED_INVALID 0x00000000 -#define NVML_PCIE_LINK_MAX_SPEED_2500MBPS 0x00000001 -#define NVML_PCIE_LINK_MAX_SPEED_5000MBPS 0x00000002 -#define NVML_PCIE_LINK_MAX_SPEED_8000MBPS 0x00000003 -#define NVML_PCIE_LINK_MAX_SPEED_16000MBPS 0x00000004 -#define NVML_PCIE_LINK_MAX_SPEED_32000MBPS 0x00000005 -#define NVML_PCIE_LINK_MAX_SPEED_64000MBPS 0x00000006 +typedef struct +{ + unsigned int version; //!< IN: The version number of this struct + unsigned int engineId; //!< IN: Engine whose software scheduler state info is fetched. One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*. + unsigned int schedulerPolicy; //!< OUT: Scheduler policy + unsigned int arrMode; //!< OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*. + nvmlVgpuSchedulerParams_t schedulerParams; //!< OUT: vGPU Scheduler Parameters +} nvmlVgpuSchedulerStateInfo_v1_t; +typedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t; +#define nvmlVgpuSchedulerStateInfo_v1 NVML_STRUCT_VERSION(VgpuSchedulerStateInfo, 1) -/* - * Adaptive clocking status +/** + * Structure to store vGPU scheduler log information -- Version 1 */ -#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED 0x00000000 -#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED 0x00000001 - -#define NVML_MAX_GPU_UTILIZATIONS 8 -typedef enum nvmlGpuUtilizationDomainId_t +typedef struct { - NVML_GPU_UTILIZATION_DOMAIN_GPU = 0, //!< Graphics engine domain - NVML_GPU_UTILIZATION_DOMAIN_FB = 1, //!< Frame buffer domain - NVML_GPU_UTILIZATION_DOMAIN_VID = 2, //!< Video engine domain - NVML_GPU_UTILIZATION_DOMAIN_BUS = 3, //!< Bus interface domain -} nvmlGpuUtilizationDomainId_t; + unsigned int version; //!< IN: The version number of this struct + unsigned int engineId; //!< IN: Engine whose software runlist log entries are fetched. One of One of NVML_VGPU_SCHEDULER_ENGINE_TYPE_*. + unsigned int schedulerPolicy; //!< OUT: Scheduler policy + unsigned int arrMode; //!< OUT: Adaptive Round Robin scheduler mode. One of the NVML_VGPU_SCHEDULER_ARR_*. + nvmlVgpuSchedulerParams_t schedulerParams; //!< OUT: vGPU Scheduler Parameters + unsigned int entriesCount; //!< OUT: Count of log entries fetched + nvmlVgpuSchedulerLogEntry_t logEntries[NVML_SCHEDULER_SW_MAX_LOG_ENTRIES]; //!< OUT: Structure to store the state and logs of a software runlist +} nvmlVgpuSchedulerLogInfo_v1_t; +typedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t; +#define nvmlVgpuSchedulerLogInfo_v1 NVML_STRUCT_VERSION(VgpuSchedulerLogInfo, 1) -typedef struct { - unsigned int bIsPresent; - unsigned int percentage; - unsigned int incThreshold; - unsigned int decThreshold; -} nvmlGpuDynamicPstatesInfoUtilization_t; - -typedef struct nvmlGpuDynamicPstatesInfo_st +/** + * Structure to store creatable vGPU placement information -- version 1 + */ +typedef struct { - unsigned int flags; //!< Reserved for future use - nvmlGpuDynamicPstatesInfoUtilization_t utilization[NVML_MAX_GPU_UTILIZATIONS]; -} nvmlGpuDynamicPstatesInfo_t; + unsigned int version; //!< IN: The version number of this struct + nvmlVgpuTypeId_t vgpuTypeId; //!< IN: Handle to vGPU type + unsigned int count; //!< IN/OUT: Count of the placement IDs + unsigned int *placementIds; //!< IN/OUT: Placement IDs for the vGPU type + unsigned int placementSize; //!< OUT: The number of slots occupied by the vGPU type +} nvmlVgpuCreatablePlacementInfo_v1_t; +typedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t; +#define nvmlVgpuCreatablePlacementInfo_v1 NVML_STRUCT_VERSION(VgpuCreatablePlacementInfo, 1) /** @} */ /** @} */ @@ -1546,7 +2188,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_RETIRED_DBE 30 //!< Number of retired pages because of double bit errors #define NVML_FI_DEV_RETIRED_PENDING 31 //!< If any pages are pending retirement. 1=yes. 0=no. -/* NvLink Flit Error Counters */ +/** + * NVLink Flit Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 32 //!< NVLink flow control CRC Error Counter for Lane 0 #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 33 //!< NVLink flow control CRC Error Counter for Lane 1 #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 34 //!< NVLink flow control CRC Error Counter for Lane 2 @@ -1555,7 +2201,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 37 //!< NVLink flow control CRC Error Counter for Lane 5 #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL 38 //!< NVLink flow control CRC Error Counter total for all Lanes -/* NvLink CRC Data Error Counters */ +/** + * NVLink CRC Data Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 39 //!< NVLink data CRC Error Counter for Lane 0 #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 40 //!< NVLink data CRC Error Counter for Lane 1 #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 41 //!< NVLink data CRC Error Counter for Lane 2 @@ -1564,7 +2214,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 44 //!< NVLink data CRC Error Counter for Lane 5 #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL 45 //!< NvLink data CRC Error Counter total for all Lanes -/* NvLink Replay Error Counters */ +/** + * NVLink Replay Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 46 //!< NVLink Replay Error Counter for Lane 0 #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 47 //!< NVLink Replay Error Counter for Lane 1 #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 48 //!< NVLink Replay Error Counter for Lane 2 @@ -1573,7 +2227,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 51 //!< NVLink Replay Error Counter for Lane 5 #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL 52 //!< NVLink Replay Error Counter total for all Lanes -/* NvLink Recovery Error Counters */ +/** + * NVLink Recovery Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 53 //!< NVLink Recovery Error Counter for Lane 0 #define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 54 //!< NVLink Recovery Error Counter for Lane 1 #define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 55 //!< NVLink Recovery Error Counter for Lane 2 @@ -1624,7 +2282,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st /* Energy Counter */ #define NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION 83 //!< Total energy consumption for the GPU in mJ since the driver was last reloaded -/* NVLink Speed */ +/** + * NVLink Speed + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_SPEED_MBPS_L0 84 //!< NVLink Speed in MBps for Link 0 #define NVML_FI_DEV_NVLINK_SPEED_MBPS_L1 85 //!< NVLink Speed in MBps for Link 1 #define NVML_FI_DEV_NVLINK_SPEED_MBPS_L2 86 //!< NVLink Speed in MBps for Link 2 @@ -1641,7 +2303,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_PCIE_REPLAY_COUNTER 94 //!< PCIe replay counter #define NVML_FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER 95 //!< PCIe replay rollover counter -/* NvLink Flit Error Counters */ +/** + * NVLink Flit Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 96 //!< NVLink flow control CRC Error Counter for Lane 6 #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 97 //!< NVLink flow control CRC Error Counter for Lane 7 #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 98 //!< NVLink flow control CRC Error Counter for Lane 8 @@ -1649,7 +2315,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 100 //!< NVLink flow control CRC Error Counter for Lane 10 #define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 101 //!< NVLink flow control CRC Error Counter for Lane 11 -/* NvLink CRC Data Error Counters */ +/** + * NVLink CRC Data Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 102 //!< NVLink data CRC Error Counter for Lane 6 #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 103 //!< NVLink data CRC Error Counter for Lane 7 #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 104 //!< NVLink data CRC Error Counter for Lane 8 @@ -1657,7 +2327,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 106 //!< NVLink data CRC Error Counter for Lane 10 #define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 107 //!< NVLink data CRC Error Counter for Lane 11 -/* NvLink Replay Error Counters */ +/** + * NVLink Replay Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 108 //!< NVLink Replay Error Counter for Lane 6 #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 109 //!< NVLink Replay Error Counter for Lane 7 #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 110 //!< NVLink Replay Error Counter for Lane 8 @@ -1665,7 +2339,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 112 //!< NVLink Replay Error Counter for Lane 10 #define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 113 //!< NVLink Replay Error Counter for Lane 11 -/* NvLink Recovery Error Counters */ +/** + * NVLink Recovery Error Counters + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 114 //!< NVLink Recovery Error Counter for Lane 6 #define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 115 //!< NVLink Recovery Error Counter for Lane 7 #define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 116 //!< NVLink Recovery Error Counter for Lane 8 @@ -1697,7 +2375,11 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L10 130 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 10 #define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L11 131 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 11 -/* NVLink Speed */ +/** + * NVLink Speed + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ #define NVML_FI_DEV_NVLINK_SPEED_MBPS_L6 132 //!< NVLink Speed in MBps for Link 6 #define NVML_FI_DEV_NVLINK_SPEED_MBPS_L7 133 //!< NVLink Speed in MBps for Link 7 #define NVML_FI_DEV_NVLINK_SPEED_MBPS_L8 134 //!< NVLink Speed in MBps for Link 8 @@ -1724,49 +2406,284 @@ typedef struct nvmlGpuDynamicPstatesInfo_st #define NVML_FI_DEV_REMAPPED_FAILURE 145 //!< If any rows failed to be remapped 1=yes 0=no /** - * Remote device NVLink ID + * Remote device NVLink ID + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_REMOTE_NVLINK_ID 146 //!< Remote device NVLink ID + +/** + * NVSwitch: connected NVLink count + */ +#define NVML_FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT 147 //!< Number of NVLinks connected to NVSwitch + +/* NvLink ECC Data Error Counters + * + * Lane ID needs to be specified in the scopeId field in nvmlFieldValue_t. + * + */ +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 148 //!< NVLink data ECC Error Counter for Link 0 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 149 //!< NVLink data ECC Error Counter for Link 1 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 150 //!< NVLink data ECC Error Counter for Link 2 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 151 //!< NVLink data ECC Error Counter for Link 3 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 152 //!< NVLink data ECC Error Counter for Link 4 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 153 //!< NVLink data ECC Error Counter for Link 5 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 154 //!< NVLink data ECC Error Counter for Link 6 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 155 //!< NVLink data ECC Error Counter for Link 7 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 156 //!< NVLink data ECC Error Counter for Link 8 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 157 //!< NVLink data ECC Error Counter for Link 9 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 158 //!< NVLink data ECC Error Counter for Link 10 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 159 //!< NVLink data ECC Error Counter for Link 11 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL 160 //!< NVLink data ECC Error Counter total for all Links + +/** + * NVLink Error Replay + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_ERROR_DL_REPLAY 161 //!< NVLink Replay Error Counter + //!< This is unsupported for Blackwell+. + //!< Please use NVML_FI_DEV_NVLINK_COUNT_LINK_RECOVERY_* +/** + * NVLink Recovery Error Counter + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_ERROR_DL_RECOVERY 162 //!< NVLink Recovery Error Counter + //!< This is unsupported for Blackwell+ + //!< Please use NVML_FI_DEV_NVLINK_COUNT_LINK_RECOVERY_* + +/** + * NVLink Recovery Error CRC Counter + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_ERROR_DL_CRC 163 //!< NVLink CRC Error Counter + //!< This is unsupported for Blackwell+ + //!< Please use NVML_FI_DEV_NVLINK_COUNT_LINK_RECOVERY_* + +/** + * NVLink Speed, State and Version field id 164, 165, and 166 + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_GET_SPEED 164 //!< NVLink Speed in MBps +#define NVML_FI_DEV_NVLINK_GET_STATE 165 //!< NVLink State - Active,Inactive +#define NVML_FI_DEV_NVLINK_GET_VERSION 166 //!< NVLink Version + +#define NVML_FI_DEV_NVLINK_GET_POWER_STATE 167 //!< NVLink Power state. 0=HIGH_SPEED 1=LOW_SPEED +#define NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD 168 //!< NVLink length of idle period (units can be found from + //!< NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_UNITS) before + //!< transitioning links to sleep state + +#define NVML_FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER 169 //!< Device PEX error recovery counter + +#define NVML_FI_DEV_C2C_LINK_COUNT 170 //!< Number of C2C Links present on the device +#define NVML_FI_DEV_C2C_LINK_GET_STATUS 171 //!< C2C Link Status 0=INACTIVE 1=ACTIVE +#define NVML_FI_DEV_C2C_LINK_GET_MAX_BW 172 //!< C2C Link Speed in MBps for active links + +#define NVML_FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS 173 //!< PCIe Correctable Errors Counter +#define NVML_FI_DEV_PCIE_COUNT_NAKS_RECEIVED 174 //!< PCIe NAK Receive Counter +#define NVML_FI_DEV_PCIE_COUNT_RECEIVER_ERROR 175 //!< PCIe Receiver Error Counter +#define NVML_FI_DEV_PCIE_COUNT_BAD_TLP 176 //!< PCIe Bad TLP Counter +#define NVML_FI_DEV_PCIE_COUNT_NAKS_SENT 177 //!< PCIe NAK Send Counter +#define NVML_FI_DEV_PCIE_COUNT_BAD_DLLP 178 //!< PCIe Bad DLLP Counter +#define NVML_FI_DEV_PCIE_COUNT_NON_FATAL_ERROR 179 //!< PCIe Non Fatal Error Counter +#define NVML_FI_DEV_PCIE_COUNT_FATAL_ERROR 180 //!< PCIe Fatal Error Counter +#define NVML_FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ 181 //!< PCIe Unsupported Request Counter +#define NVML_FI_DEV_PCIE_COUNT_LCRC_ERROR 182 //!< PCIe LCRC Error Counter +#define NVML_FI_DEV_PCIE_COUNT_LANE_ERROR 183 //!< PCIe Per Lane Error Counter. + +#define NVML_FI_DEV_IS_RESETLESS_MIG_SUPPORTED 184 //!< Device's Restless MIG Capability + +/** + * Retrieves power usage for this GPU in milliwatts. + * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode and + * \ref nvmlDeviceGetPowerUsage. + * + * scopeId needs to be specified. It signifies: + * 0 - GPU Only Scope - Metrics for GPU are retrieved + * 1 - Module scope - Metrics for the module (e.g. CPU + GPU) are retrieved. + * Note: CPU here refers to NVIDIA CPU (e.g. Grace). x86 or non-NVIDIA ARM is not supported + */ +#define NVML_FI_DEV_POWER_AVERAGE 185 //!< GPU power averaged over 1 sec interval, supported on Ampere (except GA100) or newer architectures. +#define NVML_FI_DEV_POWER_INSTANT 186 //!< Current GPU power, supported on all architectures. +#define NVML_FI_DEV_POWER_MIN_LIMIT 187 //!< Minimum power limit in milliwatts. +#define NVML_FI_DEV_POWER_MAX_LIMIT 188 //!< Maximum power limit in milliwatts. +#define NVML_FI_DEV_POWER_DEFAULT_LIMIT 189 //!< Default power limit in milliwatts (limit which device boots with). +#define NVML_FI_DEV_POWER_CURRENT_LIMIT 190 //!< Limit currently enforced in milliwatts (This includes other limits set elsewhere. E.g. Out-of-band). +#define NVML_FI_DEV_ENERGY 191 //!< Total energy consumption (in mJ) since the driver was last reloaded. Same as \ref NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION for the GPU. +#define NVML_FI_DEV_POWER_REQUESTED_LIMIT 192 //!< Power limit requested by NVML or any other userspace client. + +/** + * GPU T.Limit temperature thresholds in degree Celsius + * + * These fields are supported on Ada and later architectures and supersedes \ref nvmlDeviceGetTemperatureThreshold. + */ +#define NVML_FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT 193 //!< T.Limit temperature after which GPU may shut down for HW protection +#define NVML_FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT 194 //!< T.Limit temperature after which GPU may begin HW slowdown +#define NVML_FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT 195 //!< T.Limit temperature after which GPU may begin SW slowdown due to memory temperature +#define NVML_FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT 196 //!< T.Limit temperature after which GPU may be throttled below base clock + +#define NVML_FI_DEV_PCIE_COUNT_TX_BYTES 197 //!< PCIe transmit bytes. Value can be wrapped. +#define NVML_FI_DEV_PCIE_COUNT_RX_BYTES 198 //!< PCIe receive bytes. Value can be wrapped. + +#define NVML_FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE 199 //!< MIG mode independent, MIG query capable device. 1=yes. 0=no. + +#define NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_MAX 200 //!< Max Nvlink Power Threshold. See NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD + +/** + * NVLink counter field id 201-225 + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_COUNT_XMIT_PACKETS 201 //!> NVML_GPU_FABRIC_HEALTH_MASK_SHIFT##type) & \ + (NVML_GPU_FABRIC_HEALTH_MASK_WIDTH##type)) + +/** + * GPU Fabric Health Status Mask for various fields can be tested + * using the below macro. + * Ex - NVML_GPU_FABRIC_HEALTH_TEST(var, _DEGRADED_BW, _TRUE) + */ +#define NVML_GPU_FABRIC_HEALTH_TEST(var, type, val) \ + (NVML_GPU_FABRIC_HEALTH_GET(var, type) == \ + NVML_GPU_FABRIC_HEALTH_MASK##type##val) + +/** +* GPU Fabric information (v2). +* +* Version 2 adds the \ref nvmlGpuFabricInfo_v2_t.version field +* to the start of the structure, and the \ref nvmlGpuFabricInfo_v2_t.healthMask +* field to the end. This structure is not backwards-compatible with +* \ref nvmlGpuFabricInfo_t. +*/ +typedef struct { + unsigned int version; //!< Structure version identifier (set to nvmlGpuFabricInfo_v2) + unsigned char clusterUuid[NVML_GPU_FABRIC_UUID_LEN]; //!< Uuid of the cluster to which this GPU belongs + nvmlReturn_t status; //!< Error status, if any. Must be checked only if state returns "complete". + unsigned int cliqueId; //!< ID of the fabric clique to which this GPU belongs + nvmlGpuFabricState_t state; //!< Current state of GPU registration process + unsigned int healthMask; //!< GPU Fabric health Status Mask +} nvmlGpuFabricInfo_v2_t; + +typedef nvmlGpuFabricInfo_v2_t nvmlGpuFabricInfoV_t; + +/** +* Version identifier value for \ref nvmlGpuFabricInfo_v2_t.version. +*/ +#define nvmlGpuFabricInfo_v2 NVML_STRUCT_VERSION(GpuFabricInfo, 2) + /** @} */ /***************************************************************************************************/ @@ -2501,35 +3786,105 @@ nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion(int *cudaDriverVersion); * - \ref NVML_ERROR_LIBRARY_NOT_FOUND if \a libcuda.so.1 or libcuda.dll is not found * - \ref NVML_ERROR_FUNCTION_NOT_FOUND if \a cuDriverGetVersion() is not found in the shared library */ -nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion_v2(int *cudaDriverVersion); +nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion_v2(int *cudaDriverVersion); + +/** + * Macros for converting the CUDA driver version number to Major and Minor version numbers. + */ +#define NVML_CUDA_DRIVER_VERSION_MAJOR(v) ((v)/1000) +#define NVML_CUDA_DRIVER_VERSION_MINOR(v) (((v)%1000)/10) + +/** + * Gets name of the process with provided process id + * + * For all products. + * + * Returned process name is cropped to provided length. + * name string is encoded in ANSI. + * + * @param pid The identifier of the process + * @param name Reference in which to return the process name + * @param length The maximum allowed length of the string returned in \a name + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0. + * - \ref NVML_ERROR_NOT_FOUND if process doesn't exists + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemGetProcessName(unsigned int pid, char *name, unsigned int length); + +/** + * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. + * + * For S-class products. + * + * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. + * The HIC must be connected to an S-class system for it to be reported by this function. + * + * @param hwbcCount Size of hwbcEntries array + * @param hwbcEntries Array holding information about hwbc + * + * @return + * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); + +/** + * Retrieve the set of GPUs that have a CPU affinity with the given CPU number + * For all products. + * Supported on Linux only. + * + * @param cpuNumber The CPU number + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); /** - * Macros for converting the CUDA driver version number to Major and Minor version numbers. + * Structure to store Driver branch information */ -#define NVML_CUDA_DRIVER_VERSION_MAJOR(v) ((v)/1000) -#define NVML_CUDA_DRIVER_VERSION_MINOR(v) (((v)%1000)/10) +typedef struct +{ + unsigned int version; //!< The version number of this struct + char branch[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< driver branch +} nvmlSystemDriverBranchInfo_v1_t; +typedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t; +#define nvmlSystemDriverBranchInfo_v1 NVML_STRUCT_VERSION(SystemDriverBranchInfo, 1) /** - * Gets name of the process with provided process id + * Retrieves the driver branch of the NVIDIA driver installed on the system. * * For all products. * - * Returned process name is cropped to provided length. - * name string is encoded in ANSI. + * The branch identifier is an alphanumeric string. It will not exceed 80 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. * - * @param pid The identifier of the process - * @param name Reference in which to return the process name - * @param length The maximum allowed length of the string returned in \a name + * @param branchInfo Pointer to the driver branch information structure \a nvmlSystemDriverBranchInfo_t + * @param length The maximum allowed length of the driver branch string * * @return - * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_SUCCESS successful completion * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0. - * - \ref NVML_ERROR_NOT_FOUND if process doesn't exists - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a branchInfo is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlSystemGetProcessName(unsigned int pid, char *name, unsigned int length); +nvmlReturn_t DECLDIR nvmlSystemGetDriverBranch(nvmlSystemDriverBranchInfo_t *branchInfo, unsigned int length); + /** @} */ @@ -2696,24 +4051,6 @@ nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_ */ nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); -/** - * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. - * - * For S-class products. - * - * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. - * The HIC must be connected to an S-class system for it to be reported by this function. - * - * @param hwbcCount Size of hwbcEntries array - * @param hwbcEntries Array holding information about hwbc - * - * @return - * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small - */ -nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); /** @} */ /***************************************************************************************************/ @@ -2851,7 +4188,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex_v2(unsigned int index, nvmlDevic nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); /** - * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. + * Acquire the handle for a particular device, based on its globally unique immutable UUID (in ASCII format) associated with each device. * * For all products. * @@ -2875,6 +4212,29 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_ */ nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); +/** + * Acquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. + * See \ref nvmlUUID_v1_t for more information on the UUID struct. The caller must set the appropriate version prior to calling this API. + * + * For all products. + * + * @param[in] uuid The UUID of the target GPU or MIG instance + * @param[out] device Reference in which to return the device handle or MIG device handle + * + * This API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid, \a device is null or \a uuid->type is invalid + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the provided version is invalid/unsupported + * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUIDV(const nvmlUUID_t *uuid, nvmlDevice_t *device); + /** * Acquire the handle for a particular device, based on its PCI bus id. * @@ -2890,6 +4250,11 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *d * instead of NVML_ERROR_NO_PERMISSION. * * @param pciBusId The PCI bus id of the target GPU + * Accept the following formats (all numbers in hexadecimal): + * domain:bus:device.function in format %x:%x:%x.%x + * domain:bus:device in format %x:%x:%x + * bus:device.function in format %x:%x.%x + * * @param device Reference in which to return the device handle * * @return @@ -3008,6 +4373,37 @@ nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index */ nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); +/** + * Get a unique identifier for the device module on the baseboard + * + * This API retrieves a unique identifier for each GPU module that exists on a given baseboard. + * For non-baseboard products, this ID would always be 0. + * + * @param device The identifier of the target device + * @param moduleId Unique identifier for the GPU module + * + * @return + * - \ref NVML_SUCCESS if \a moduleId has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a moduleId is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetModuleId(nvmlDevice_t device, unsigned int *moduleId); + +/** + * Retrieves the Device's C2C Mode information + * + * @param device The identifier of the target device + * @param c2cModeInfo Output struct containing the device's C2C Mode info + * + * @return + * - \ref NVML_SUCCESS if \a C2C Mode Infor query is successful + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetC2cModeInfoV(nvmlDevice_t device, nvmlC2cModeInfo_v1_t *c2cModeInfo); /***************************************************************************************************/ @@ -3147,6 +4543,19 @@ nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); */ nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); +/** + * Get the NUMA node of the given GPU device. + * This only applies to platforms where the GPUs are NUMA nodes. + * + * @param[in] device The device handle + * @param[out] node NUMA node ID of the device + * + * @returns + * - \ref NVML_SUCCESS if the NUMA node is retrieved successfully + * - \ref NVML_ERROR_NOT_SUPPORTED if request is not supported on the current platform + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device \a node is invalid + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumaNodeId(nvmlDevice_t device, unsigned int *node); /** * Retrieve the common ancestor for two devices * For all products. @@ -3186,25 +4595,6 @@ nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, n */ nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); -/** - * Retrieve the set of GPUs that have a CPU affinity with the given CPU number - * For all products. - * Supported on Linux only. - * - * @param cpuNumber The CPU number - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); - /** * Retrieve the status for a given p2p capability index between a given pair of GPU * @@ -3248,31 +4638,6 @@ nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t d */ nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); -/** - * Retrieve the MDEV UUID of a vGPU instance. - * - * The MDEV UUID is a globally unique identifier of the mdev device assigned to the VM, and is returned as a 5-part hexadecimal string, - * not exceeding 80 characters in length (including the NULL terminator). - * MDEV UUID is displayed only on KVM platform. - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param mdevUuid Pointer to caller-supplied buffer to hold MDEV UUID - * @param size Size of buffer in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED on any hypervisor other than KVM - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mdevUuid is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char *mdevUuid, unsigned int size); - /** * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for * each GPU will have the form /dev/nvidia[minor number]. @@ -3408,6 +4773,27 @@ nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t devi */ nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); +/** + * Retrieves the timestamp and the duration of the last flush of the BBX (blackbox) infoROM object during the current run. + * + * For all products with an inforom. + * + * @param device The identifier of the target device + * @param timestamp The start timestamp of the last BBX Flush + * @param durationUs The duration (us) of the last BBX Flush + * + * @return + * - \ref NVML_SUCCESS if \a timestamp and \a durationUs are successfully retrieved + * - \ref NVML_ERROR_NOT_READY if the BBX object has not been flushed yet + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetLastBBXFlushTime(nvmlDevice_t device, unsigned long long *timestamp, + unsigned long *durationUs); + /** * Retrieves the display mode for the device. * @@ -3482,6 +4868,25 @@ nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableS */ nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); +/** + * Retrieves PCI attributes of this device. + * + * For all products. + * + * See \ref nvmlPciInfoExt_v1_t for details on the available PCI info. + * + * @param device The identifier of the target device + * @param pci Reference in which to return the PCI info + * + * @return + * - \ref NVML_SUCCESS if \a pci has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfoExt(nvmlDevice_t device, nvmlPciInfoExt_t *pci); + /** * Retrieves the PCI attributes of this device. * @@ -3683,6 +5088,20 @@ nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t */ nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); +/** + * Retrieve the GPCCLK VF offset value + * @param[in] device The identifier of the target device + * @param[out] offset The retrieved GPCCLK VF offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int *offset); + /** * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. * Can be changed using \ref nvmlDeviceSetApplicationsClocks. @@ -3725,33 +5144,6 @@ nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClo */ nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); -/** - * Resets the application clock to the default value - * - * This is the applications clock that will be used after system reboot or driver reload. - * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, - * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above - * base clocks as thermal limits allow. - * - * @see nvmlDeviceGetApplicationsClock - * @see nvmlDeviceSetApplicationsClocks - * - * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); - /** * Retrieves the clock speed for the clock specified by the clock type and clock ID. * @@ -3831,107 +5223,44 @@ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, uns * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedMemoryClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. - * - * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device - * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will - * revert to when no applications are using the GPU - * - * @return - * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); - -/** - * Try to set the current state of Auto Boosted clocks on a device. - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * Non-root users may use this API by default but can be restricted by root from using this API by calling - * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. - * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set Auto Boosted clocks of the target device to - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedMemoryClocks */ -nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); /** - * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will - * return to when no compute running processes (e.g. CUDA application which have an active context) are running + * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. + * For Kepler &tm; or newer fully supported devices. * * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. + * to maximize performance as thermal limits allow. * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost * behavior. * * @param device The identifier of the target device - * @param enabled What state to try to set default Auto Boosted clocks of the target device to - * @param flags Flags that change the default behavior. Currently Unused. + * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device + * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will + * revert to when no applications are using the GPU * * @return - * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled + * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * */ -nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); - +nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); /** * Retrieves the intended operating speed of the device's fan. @@ -3983,6 +5312,30 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *sp */ nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int * speed); +/** + * Retrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan. + * + * For Maxwell &tm; or newer fully supported devices. + * + * For all discrete products with dedicated fans. + * + * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the + * output will not match the actual fan speed. + * + * @param device The identifier of the target device + * @param fanSpeed Structure specifying the index of the target fan (input) and + * retrieved fan speed value (output) + * + * @return + * - \ref NVML_SUCCESS If everything worked + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, \a fan is not an acceptable + * index, or \a speed is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the provided version is invalid/unsupported + * - \ref NVML_ERROR_NOT_SUPPORTED If the \a device does not support this feature + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeedRPM(nvmlDevice_t device, nvmlFanSpeedInfo_t *fanSpeed); + /** * Retrieves the intended target speed of the device's specified fan. * @@ -4011,24 +5364,6 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int */ nvmlReturn_t DECLDIR nvmlDeviceGetTargetFanSpeed(nvmlDevice_t device, unsigned int fan, unsigned int *targetSpeed); -/** - * Sets the speed of the fan control policy to default. - * - * For all cuda-capable discrete products with fans - * - * @param device The identifier of the target device - * @param fan The index of the fan, starting at zero - * - * return - * NVML_SUCCESS if speed has been adjusted - * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * NVML_ERROR_INVALID_ARGUMENT if device is invalid - * NVML_ERROR_NOT_SUPPORTED if the device does not support this - * (doesn't have fans) - * NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan); - /** * Retrieves the min and max fan speed that user can set for the GPU fan. * @@ -4071,213 +5406,506 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t device, unsig nvmlFanControlPolicy_t *policy); /** - * Sets current fan control policy. + * Retrieves the number of fans on the device. + * + * For all discrete products with dedicated fans. + * + * @param device The identifier of the target device + * @param numFans The number of fans + * + * @return + * - \ref NVML_SUCCESS if \a fan number query was successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int *numFans); + +/** + * @deprecated Use \ref nvmlDeviceGetTemperatureV instead + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); + +/** + * Retrieves the cooler's information. + * Returns a cooler's control signal characteristics. The possible types are restricted, Variable and Toggle. + * See \ref nvmlCoolerControl_t for details on available signal types. + * Returns objects that cooler cools. Targets may be GPU, Memory, Power Supply or All of these. + * See \ref nvmlCoolerTarget_t for details on available targets. * * For Maxwell &tm; or newer fully supported devices. * - * Requires privileged user. + * For all discrete products with dedicated fans. * - * For all cuda-capable discrete products with fans + * @param[in] device The identifier of the target device + * @param[out] coolerInfo Structure specifying the cooler's control signal characteristics (out) + * and the target that cooler cools (out) * - * device The identifier of the target \a device - * policy The fan control \a policy to set + * @return + * - \ref NVML_SUCCESS If everything worked + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, \a signalType or \a target is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the provided version is invalid/unsupported + * - \ref NVML_ERROR_NOT_SUPPORTED If the \a device does not support this feature + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCoolerInfo(nvmlDevice_t device, nvmlCoolerInfo_t *coolerInfo); + +/** + * Structure used to encapsulate temperature info + */ +typedef struct +{ + unsigned int version; + nvmlTemperatureSensors_t sensorType; + int temperature; +} nvmlTemperature_v1_t; + +typedef nvmlTemperature_v1_t nvmlTemperature_t; + +#define nvmlTemperature_v1 NVML_STRUCT_VERSION(Temperature, 1) + +/** + * Retrieves the current temperature readings (in degrees C) for the given device. * - * return - * NVML_SUCCESS if \a policy has been set - * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference - * a fan that exists. - * NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell - * NVML_ERROR_UNKNOWN on any unexpected error + * For all products. + * + * @param[in] device Target device identifier. + * @param[in,out] temperature Structure specifying the sensor type (input) and retrieved + * temperature value (output). + * + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureV(nvmlDevice_t device, nvmlTemperature_t *temperature); + + +/** + * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * Note: This API is no longer the preferred interface for retrieving the following temperature thresholds + * on Ada and later architectures: NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, NVML_TEMPERATURE_THRESHOLD_SLOWDOWN, + * NVML_TEMPERATURE_THRESHOLD_MEM_MAX and NVML_TEMPERATURE_THRESHOLD_GPU_MAX. + * + * Support for reading these temperature thresholds for Ada and later architectures would be removed from this + * API in future releases. Please use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_TEMPERATURE_* fields to retrieve + * temperature thresholds on these architectures. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value queried + * @param temp Reference in which to return the temperature reading + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); + +/** + * Retrieves the thermal margin temperature (distance to nearest slowdown threshold). + * + * @param[in] device The identifier of the target device + * @param[in,out] marginTempInfo Versioned structure in which to return the temperature reading + * + * @returns + * - \ref NVML_SUCCESS if the margin temperature was retrieved successfully + * - \ref NVML_ERROR_NOT_SUPPORTED if request is not supported on the current platform + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a temperature is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the right versioned structure is not used + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMarginTemperature(nvmlDevice_t device, nvmlMarginTemperature_t *marginTempInfo); + +/** + * Used to execute a list of thermal system instructions. + * + * @param device The identifier of the target device + * @param sensorIndex The index of the thermal sensor + * @param pThermalSettings Reference in which to return the thermal sensor information + * + * @return + * - \ref NVML_SUCCESS if \a pThermalSettings has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pThermalSettings is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t *pThermalSettings); + +/** + * Retrieves the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * Retrieves current clocks event reasons. + * + * For all fully supported products. + * + * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. + * + * @param device The identifier of the target device + * @param clocksEventReasons Reference in which to return bitmask of active clocks event + * reasons + * + * @return + * - \ref NVML_SUCCESS if \a clocksEventReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksEventReasons is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksEventReasons + * @see nvmlDeviceGetSupportedClocksEventReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice_t device, unsigned long long *clocksEventReasons); + +/** + * @deprecated Use \ref nvmlDeviceGetCurrentClocksEventReasons instead + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); + +/** + * Retrieves bitmask of supported clocks event reasons that can be returned by + * \ref nvmlDeviceGetCurrentClocksEventReasons + * + * For all fully supported products. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param supportedClocksEventReasons Reference in which to return bitmask of supported + * clocks event reasons + * + * @return + * - \ref NVML_SUCCESS if \a supportedClocksEventReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksEventReasons is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksEventReasons + * @see nvmlDeviceGetCurrentClocksEventReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice_t device, unsigned long long *supportedClocksEventReasons); + +/** + * @deprecated Use \ref nvmlDeviceGetSupportedClocksEventReasons instead + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); + +/** + * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. + * + * Retrieve the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * Retrieve performance monitor samples from the associated subdevice. + * + * @param device + * @param pDynamicPstatesInfo + * + * @return + * - \ref NVML_SUCCESS if \a pDynamicPstatesInfo has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pDynamicPstatesInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, - nvmlFanControlPolicy_t policy); +nvmlReturn_t DECLDIR nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t *pDynamicPstatesInfo); /** - * Retrieves the number of fans on the device. - * - * For all discrete products with dedicated fans. - * - * @param device The identifier of the target device - * @param numFans The number of fans + * Retrieve the MemClk (Memory Clock) VF offset value. + * @param[in] device The identifier of the target device + * @param[out] offset The retrieved MemClk VF offset value * * @return - * - \ref NVML_SUCCESS if \a fan number query was successful + * - \ref NVML_SUCCESS if \a offset has been successfully queried * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int *numFans); +nvmlReturn_t DECLDIR nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int *offset); /** - * Retrieves the current temperature readings for the device, in degrees C. - * - * For all products. - * - * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. + * Retrieve min and max clocks of some clock domain for a given PState * * @param device The identifier of the target device - * @param sensorType Flag that indicates which sensor reading to retrieve - * @param temp Reference in which to return the temperature reading + * @param type Clock domain + * @param pstate PState to query + * @param minClockMHz Reference in which to return min clock frequency + * @param maxClockMHz Reference in which to return max clock frequency * * @return - * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_SUCCESS if everything worked * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a type or \a pstate are invalid or both + * \a minClockMHz and \a maxClockMHz are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); +nvmlReturn_t DECLDIR nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, + unsigned int * minClockMHz, unsigned int * maxClockMHz); /** - * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. + * Get all supported Performance States (P-States) for the device. * - * For Kepler &tm; or newer fully supported devices. + * The returned array would contain a contiguous list of valid P-States supported by + * the device. If the number of supported P-States is fewer than the size of the array + * supplied missing elements would contain \a NVML_PSTATE_UNKNOWN. * - * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * The number of elements in the returned list will never exceed \a NVML_MAX_GPU_PERF_PSTATES. * * @param device The identifier of the target device - * @param thresholdType The type of threshold value queried - * @param temp Reference in which to return the temperature reading + * @param pstates Container to return the list of performance states + * supported by device + * @param size Size of the supplied \a pstates array in bytes + * * @return - * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_SUCCESS if \a pstates array has been retrieved + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if the the container supplied was not large enough to + * hold the resulting list * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a pstates is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support performance state readings * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, + nvmlPstates_t *pstates, unsigned int size); /** - * Sets the temperature threshold for the GPU with the specified threshold type in degrees C. - * - * For Maxwell &tm; or newer fully supported devices. - * - * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * Retrieve the GPCCLK min max VF offset value. + * @param[in] device The identifier of the target device + * @param[out] minOffset The retrieved GPCCLK VF min offset value + * @param[out] maxOffset The retrieved GPCCLK VF max offset value * - * @param device The identifier of the target device - * @param thresholdType The type of threshold value to be set - * @param temp Reference which hold the value to be set * @return - * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_SUCCESS if \a offset has been successfully queried * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int *temp); +nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, + int *minOffset, int *maxOffset); /** - * Used to execute a list of thermal system instructions. - * - * @param device The identifier of the target device - * @param sensorIndex The index of the thermal sensor - * @param pThermalSettings Reference in which to return the thermal sensor information + * Retrieve the MemClk (Memory Clock) min max VF offset value. + * @param[in] device The identifier of the target device + * @param[out] minOffset The retrieved MemClk VF min offset value + * @param[out] maxOffset The retrieved MemClk VF max offset value * * @return - * - \ref NVML_SUCCESS if \a pThermalSettings has been set + * - \ref NVML_SUCCESS if \a offset has been successfully queried * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pThermalSettings is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t *pThermalSettings); +nvmlReturn_t DECLDIR nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, + int *minOffset, int *maxOffset); /** - * Retrieves the current performance state for the device. + * Retrieve min, max and current clock offset of some clock domain for a given PState * - * For Fermi &tm; or newer fully supported devices. + * For Maxwell &tm; or newer fully supported devices. * - * See \ref nvmlPstates_t for details on allowed performance states. + * Note: \ref nvmlDeviceGetGpcClkVfOffset, \ref nvmlDeviceGetMemClkVfOffset, \ref nvmlDeviceGetGpcClkMinMaxVfOffset and + * \ref nvmlDeviceGetMemClkMinMaxVfOffset will be deprecated in a future release. + Use \ref nvmlDeviceGetClockOffsets instead. * * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading + * @param info Structure specifying the clock type (input) and the pstate (input) + * retrieved clock offset value (output), min clock offset (output) + * and max clock offset (output) * * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_SUCCESS If everything worked + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a type or \a pstate are invalid or both + * \a minClockOffsetMHz and \a maxClockOffsetMHz are NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the provided version is invalid/unsupported + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature */ -nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); +nvmlReturn_t DECLDIR nvmlDeviceGetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t *info); /** - * Retrieves current clocks throttling reasons. + * Control current clock offset of some clock domain for a given PState * - * For all fully supported products. + * For Maxwell &tm; or newer fully supported devices. * - * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. + * Requires privileged user. * - * @param device The identifier of the target device - * @param clocksThrottleReasons Reference in which to return bitmask of active clocks throttle - * reasons + * @param device The identifier of the target device + * @param info Structure specifying the clock type (input), the pstate (input) + * and clock offset value (input) * * @return - * - \ref NVML_SUCCESS if \a clocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksThrottleReasons is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetSupportedClocksThrottleReasons + * - \ref NVML_SUCCESS If everything worked + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION If the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a type or \a pstate are invalid or both + * \a clockOffsetMHz is out of allowed range. + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the provided version is invalid/unsupported + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); +nvmlReturn_t DECLDIR nvmlDeviceSetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t *info); /** - * Retrieves bitmask of supported clocks throttle reasons that can be returned by - * \ref nvmlDeviceGetCurrentClocksThrottleReasons + * Retrieves a performance mode string with all the + * performance modes defined for this device along with their associated + * GPU Clock and Memory Clock values. + * Not all tokens will be reported on all GPUs, and additional tokens + * may be added in the future. + * For backwards compatibility we still provide nvclock and memclock; + * those are the same as nvclockmin and memclockmin. * - * For all fully supported products. + * Note: These clock values take into account the offset + * set by clients through /ref nvmlDeviceSetClockOffsets. + * + * Maximum available Pstate (P15) shows the minimum performance level (0) and vice versa. + * + * Each performance modes are returned as a comma-separated list of + * "token=value" pairs. Each set of performance mode tokens are separated + * by a ";". Valid tokens: + * + * Token Value + * "perf" unsigned int - the Performance level + * "nvclock" unsigned int - the GPU clocks (in MHz) for the perf level + * "nvclockmin" unsigned int - the GPU clocks min (in MHz) for the perf level + * "nvclockmax" unsigned int - the GPU clocks max (in MHz) for the perf level + * "nvclockeditable" unsigned int - if the GPU clock domain is editable for the perf level + * "memclock" unsigned int - the memory clocks (in MHz) for the perf level + * "memclockmin" unsigned int - the memory clocks min (in MHz) for the perf level + * "memclockmax" unsigned int - the memory clocks max (in MHz) for the perf level + * "memclockeditable" unsigned int - if the memory clock domain is editable for the perf level + * "memtransferrate" unsigned int - the memory transfer rate (in MHz) for the perf level + * "memtransferratemin" unsigned int - the memory transfer rate min (in MHz) for the perf level + * "memtransferratemax" unsigned int - the memory transfer rate max (in MHz) for the perf level + * "memtransferrateeditable" unsigned int - if the memory transfer rate is editable for the perf level + * + * Example: + * + * perf=0, nvclock=324, nvclockmin=324, nvclockmax=324, nvclockeditable=0, + * memclock=324, memclockmin=324, memclockmax=324, memclockeditable=0, + * memtransferrate=648, memtransferratemin=648, memtransferratemax=648, + * memtransferrateeditable=0 ; + * perf=1, nvclock=324, nvclockmin=324, nvclockmax=640, nvclockeditable=0, + * memclock=810, memclockmin=810, memclockmax=810, memclockeditable=0, + * memtransferrate=1620, memtransferrate=1620, memtransferrate=1620, + * memtransferrateeditable=0 ; * - * This method is not supported in virtual machines running virtual GPU (vGPU). * * @param device The identifier of the target device - * @param supportedClocksThrottleReasons Reference in which to return bitmask of supported - * clocks throttle reasons + * @param perfModes Reference in which to return the performance level string * * @return - * - \ref NVML_SUCCESS if \a supportedClocksThrottleReasons has been set + * - \ref NVML_SUCCESS if \a perfModes has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksThrottleReasons is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetCurrentClocksThrottleReasons */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); +nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceModes(nvmlDevice_t device, nvmlDevicePerfModes_t *perfModes); /** - * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. + * Retrieves a string with the associated current GPU Clock and Memory Clock values. * - * Retrieve the current performance state for the device. + * Not all tokens will be reported on all GPUs, and additional tokens + * may be added in the future. * - * For Fermi &tm; or newer fully supported devices. + * Note: These clock values take into account the offset + * set by clients through /ref nvmlDeviceSetClockOffsets. + * + * Clock values are returned as a comma-separated list of + * "token=value" pairs. + * Valid tokens: + * + * Token Value + * "perf" unsigned int - the Performance level + * "nvclock" unsigned int - the GPU clocks (in MHz) for the perf level + * "nvclockmin" unsigned int - the GPU clocks min (in MHz) for the perf level + * "nvclockmax" unsigned int - the GPU clocks max (in MHz) for the perf level + * "nvclockeditable" unsigned int - if the GPU clock domain is editable for the perf level + * "memclock" unsigned int - the memory clocks (in MHz) for the perf level + * "memclockmin" unsigned int - the memory clocks min (in MHz) for the perf level + * "memclockmax" unsigned int - the memory clocks max (in MHz) for the perf level + * "memclockeditable" unsigned int - if the memory clock domain is editable for the perf level + * "memtransferrate" unsigned int - the memory transfer rate (in MHz) for the perf level + * "memtransferratemin" unsigned int - the memory transfer rate min (in MHz) for the perf level + * "memtransferratemax" unsigned int - the memory transfer rate max (in MHz) for the perf level + * "memtransferrateeditable" unsigned int - if the memory transfer rate is editable for the perf level + * + * Example: + * + * nvclock=324, nvclockmin=324, nvclockmax=324, nvclockeditable=0, + * memclock=324, memclockmin=324, memclockmax=324, memclockeditable=0, + * memtransferrate=648, memtransferratemin=648, memtransferratemax=648, + * memtransferrateeditable=0 ; * - * See \ref nvmlPstates_t for details on allowed performance states. * * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading + * @param currentClockFreqs Reference in which to return the performance level string * * @return - * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_SUCCESS if \a currentClockFreqs has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClockFreqs(nvmlDevice_t device, nvmlDeviceCurrentClockFreqs_t *currentClockFreqs); /** * This API has been deprecated. @@ -4378,7 +6006,12 @@ nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t devic * * For Fermi &tm; or newer fully supported devices. * - * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. + * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. On Ampere + * (except GA100) or newer GPUs, the API returns power averaged over 1 sec interval. On GA100 and + * older architectures, instantaneous power is returned. + * + * See \ref NVML_FI_DEV_POWER_AVERAGE and \ref NVML_FI_DEV_POWER_INSTANT to query specific power + * values. * * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. * @@ -4478,6 +6111,14 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuO * * @note nvmlDeviceGetMemoryInfo_v2 adds additional memory information. * + * @note On systems where GPUs are NUMA nodes, the accuracy of FB memory utilization + * provided by this API depends on the memory accounting of the operating system. + * This is because FB memory is managed by the operating system instead of the NVIDIA GPU driver. + * Typically, pages allocated from FB memory are not released even after + * the process terminates to enhance performance. In scenarios where + * the operating system is under memory pressure, it may resort to utilizing FB memory. + * Such actions can result in discrepancies in the accuracy of memory reporting. + * * @param device The identifier of the target device * @param memory Reference in which to return the memory information * @@ -4490,6 +6131,10 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuO * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); + +/** + * nvmlDeviceGetMemoryInfo_v2 accounts separately for reserved memory and includes it in the used memory amount. + */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t *memory); /** @@ -4538,6 +6183,66 @@ nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMo */ nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); +/** + * Retrieves the current and pending DRAM Encryption modes for the device. + * + * %BLACKWELL_OR_NEWER% + * Only applicable to devices that support DRAM Encryption + * Requires \a NVML_INFOROM_DEN version 1.0 or higher. + * + * Changing DRAM Encryption modes requires a reboot. The "pending" DRAM Encryption mode refers to the target mode following + * the next reboot. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current DRAM Encryption mode + * @param pending Reference in which to return the pending DRAM Encryption mode + * + * @return + * - \ref NVML_SUCCESS if \a current and \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the argument version is not supported + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetDramEncryptionMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDramEncryptionMode(nvmlDevice_t device, nvmlDramEncryptionInfo_t *current, nvmlDramEncryptionInfo_t *pending); + +/** + * Set the DRAM Encryption mode for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices that support DRAM Encryption. + * Requires \a NVML_INFOROM_DEN version 1.0 or higher. + * Requires root/admin permissions. + * + * The DRAM Encryption mode determines whether the GPU enables its DRAM Encryption support. + * + * This operation takes effect after the next reboot. + * + * See \ref nvmlEnableState_t for details on available modes. + * + * @param device The identifier of the target device + * @param dramEncryption The target DRAM Encryption mode + * + * @return + * - \ref NVML_SUCCESS if the DRAM Encryption mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a DRAM Encryption is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the argument version is not supported + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetDramEncryptionMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDramEncryptionMode(nvmlDevice_t device, const nvmlDramEncryptionInfo_t *dramEncryption); + /** * Retrieves the current and pending ECC modes for the device. * @@ -4834,10 +6539,10 @@ nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned in * Retrieves information about active encoder sessions on a target device. * * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The - * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions * written to the buffer. * - * If the supplied buffer is not large enough to accomodate the active session array, the function returns + * If the supplied buffer is not large enough to accommodate the active session array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. @@ -4880,13 +6585,55 @@ nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned */ nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); +/** + * Retrieves the current utilization and sampling size in microseconds for the JPG + * + * %TURING_OR_NEWER% + * + * @note On MIG-enabled GPUs, querying decoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for jpg utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetJpgUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current utilization and sampling size in microseconds for the OFA (Optical Flow Accelerator) + * + * %TURING_OR_NEWER% + * + * @note On MIG-enabled GPUs, querying decoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for ofa utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetOfaUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + /** * Retrieves the active frame buffer capture sessions statistics for a given device. * * For Maxwell &tm; or newer fully supported devices. * * @param device The identifier of the target device -* @param fbcStats Reference to nvmlFBCStats_t structure contianing NvFBC stats +* @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats * * @return * - \ref NVML_SUCCESS if \a fbcStats is fetched @@ -4904,7 +6651,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t * * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions * written to the buffer. * -* If the supplied buffer is not large enough to accomodate the active session array, the function returns +* If the supplied buffer is not large enough to accommodate the active session array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. * To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return * NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. @@ -4931,11 +6678,11 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int /** * Retrieves the current and pending driver model for the device. * - * For Fermi &tm; or newer fully supported devices. + * For Kepler &tm; or newer fully supported devices. * For windows only. * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. + * On Windows platforms the device driver can run in either WDDM, MCDM or WDM (TCC) modes. If a display is attached + * to the device it must run in WDDM mode. MCDM mode is preferred if a display is not attached. TCC mode is deprecated. * * See \ref nvmlDriverModel_t for details on available driver models. * @@ -4951,9 +6698,9 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * - * @see nvmlDeviceSetDriverModel() + * @see nvmlDeviceSetDriverModel_v2() */ -nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); +nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel_v2(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); /** * Get VBIOS version of the device. @@ -5084,7 +6831,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); /** - * Get information about processes with a MPS compute context on a device + * Get information about processes with a Multi-Process Service (MPS) compute context on a device * * For Volta &tm; or newer fully supported devices. * @@ -5126,6 +6873,57 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v3(nvmlDevice_t devic */ nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); +/** + * Get information about running processes on a device for input context + * + * For Hopper &tm; or newer fully supported devices. + * + * This function returns information only about running processes (e.g. CUDA application which have + * active context). + * + * To determine the size of the \a plist->procArray array to allocate, call the function with + * \a plist->numProcArrayEntries set to zero and \a plist->procArray set to NULL. The return + * code will be either NVML_ERROR_INSUFFICIENT_SIZE (if there are valid processes of type + * \a plist->mode to report on, in which case the \a plist->numProcArrayEntries field will + * indicate the required number of entries in the array) or NVML_SUCCESS (if no processes of type + * \a plist->mode exist). + * + * The usedGpuMemory field returned is all of the memory used by the application. + * The usedGpuCcProtectedMemory field returned is all of the protected memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a plist->procArray table in case new processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in + * vGPU Host virtualization mode. + * Protected memory usage is currently not available in MIG mode and in windows. + * + * @param device The device handle or MIG device handle + * @param plist Reference in which to process detail list + * \a plist->version The api version + * \a plist->mode The process mode + * \a plist->procArray Reference in which to return the process information + * \a plist->numProcArrayEntries Proc array size of returned entries + * + * @return + * - \ref NVML_SUCCESS if \a plist->numprocArrayEntries and \a plist->procArray have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a plist->numprocArrayEntries indicates that the \a plist->procArray is too small + * \a plist->numprocArrayEntries will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a plist is NULL, \a plist->version is invalid, + * \a plist->mode is invalid, + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRunningProcessDetailList(nvmlDevice_t device, nvmlProcessDetailList_t *plist); + /** * Check if the GPU devices are on the same physical board. * @@ -5230,156 +7028,495 @@ nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_ * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device - * @param bar1Memory Reference in which BAR1 memory - * information is returned. + * @param bar1Memory Reference in which BAR1 memory + * information is returned. + * + * @return + * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); + +/** + * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power + * or thermal constraints. + * + * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The + * difference in violation times at two different reference times gives the indication of GPU throttling event. + * + * Violation for thermal capping is not supported at this time. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param perfPolicyType Represents Performance policy which can trigger GPU throttling + * @param violTime Reference to which violation time related information is returned + * + * + * @return + * - \ref NVML_SUCCESS if violation time is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); + +/** + * Gets the device's interrupt number + * + * @param device The identifier of the target device + * @param irqNum The interrupt number associated with the specified device + * + * @return + * - \ref NVML_SUCCESS if irq number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int *irqNum); + +/** + * Gets the device's core count + * + * @param device The identifier of the target device + * @param numCores The number of cores for the specified device + * + * @return + * - \ref NVML_SUCCESS if GPU core count is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int *numCores); + +/** + * Gets the devices power source + * + * @param device The identifier of the target device + * @param powerSource The power source of the device + * + * @return + * - \ref NVML_SUCCESS if the current power source was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t *powerSource); + +/** + * Gets the device's memory bus width + * + * @param device The identifier of the target device + * @param busWidth The devices's memory bus width + * + * @return + * - \ref NVML_SUCCESS if the memory bus width is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int *busWidth); + +/** + * Gets the device's PCIE Max Link speed in MBPS + * + * @param device The identifier of the target device + * @param maxSpeed The devices's PCIE Max Link speed in MBPS + * + * @return + * - \ref NVML_SUCCESS if PCIe Max Link Speed is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int *maxSpeed); + +/** + * Gets the device's PCIe Link speed in Mbps + * + * @param device The identifier of the target device + * @param pcieSpeed The devices's PCIe Max Link speed in Mbps + * + * @return + * - \ref NVML_SUCCESS if \a pcieSpeed has been retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pcieSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support PCIe speed getting + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int *pcieSpeed); + +/** + * Gets the device's Adaptive Clock status + * + * @param device The identifier of the target device + * @param adaptiveClockStatus The current adaptive clocking status, either + * NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED + * or NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED + * + * @return + * - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int *adaptiveClockStatus); + +/** + * Get the type of the GPU Bus (PCIe, PCI, ...) + * + * @param device The identifier of the target device + * @param type The PCI Bus type + * + * return + * - \ref NVML_SUCCESS if the bus \a type is successfully retreived + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a type is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t *type); + + + /** + * Deprecated: Will be deprecated in a future release. Use \ref nvmlDeviceGetGpuFabricInfoV instead + * + * Get fabric information associated with the device. + * + * For Hopper &tm; or newer fully supported devices. + * + * On Hopper + NVSwitch systems, GPU is registered with the NVIDIA Fabric Manager + * Upon successful registration, the GPU is added to the NVLink fabric to enable + * peer-to-peer communication. + * This API reports the current state of the GPU in the NVLink fabric + * along with other useful information. + * + * + * @param device The identifier of the target device + * @param gpuFabricInfo Information about GPU fabric state + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfo(nvmlDevice_t device, nvmlGpuFabricInfo_t *gpuFabricInfo); + +/** +* Versioned wrapper around \ref nvmlDeviceGetGpuFabricInfo that accepts a versioned +* \ref nvmlGpuFabricInfo_v2_t or later output structure. +* +* @note The caller must set the \ref nvmlGpuFabricInfoV_t.version field to the +* appropriate version prior to calling this function. For example: +* \code +* nvmlGpuFabricInfoV_t fabricInfo = +* { .version = nvmlGpuFabricInfo_v2 }; +* nvmlReturn_t result = nvmlDeviceGetGpuFabricInfoV(device,&fabricInfo); +* \endcode +* +* For Hopper &tm; or newer fully supported devices. +* +* @param device The identifier of the target device +* @param gpuFabricInfo Information about GPU fabric state +* +* @return +* - \ref NVML_SUCCESS Upon success +* - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfoV(nvmlDevice_t device, + nvmlGpuFabricInfoV_t *gpuFabricInfo); + +/** + * Get Conf Computing System capabilities. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param capabilities System CC capabilities + * + * @return + * - \ref NVML_SUCCESS if \a capabilities were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a capabilities is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeCapabilities(nvmlConfComputeSystemCaps_t *capabilities); + +/** + * Get Conf Computing System State. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param state System CC State + * + * @return + * - \ref NVML_SUCCESS if \a state were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a state is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeState(nvmlConfComputeSystemState_t *state); + +/** + * Get Conf Computing Protected and Unprotected Memory Sizes. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device Device handle + * @param memInfo Protected/Unprotected Memory sizes + * + * @return + * - \ref NVML_SUCCESS if \a memInfo were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a memInfo or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice_t device, nvmlConfComputeMemSizeInfo_t *memInfo); + +/** + * Get Conf Computing GPUs ready state. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param isAcceptingWork Returns GPU current work accepting state, + * NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or + * NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE + * + * return + * - \ref NVML_SUCCESS if \a current GPUs ready state were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeGpusReadyState(unsigned int *isAcceptingWork); + +/** + * Get Conf Computing protected memory usage. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param memory Reference in which to return the memory information + * + * @return + * - \ref NVML_SUCCESS if \a memory has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice_t device, nvmlMemory_t *memory); + +/** + * Get Conf Computing GPU certificate details. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param gpuCert Reference in which to return the gpu certificate information * * @return - * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved + * - \ref NVML_SUCCESS if \a gpu certificate info has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * */ -nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice_t device, + nvmlConfComputeGpuCertificate_t *gpuCert); /** - * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power - * or thermal constraints. - * - * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The - * difference in violation times at two different reference times gives the indication of GPU throttling event. - * - * Violation for thermal capping is not supported at this time. + * Get Conf Computing GPU attestation report. * - * For Kepler &tm; or newer fully supported devices. + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. * * @param device The identifier of the target device - * @param perfPolicyType Represents Performance policy which can trigger GPU throttling - * @param violTime Reference to which violation time related information is returned - * + * @param gpuAtstReport Reference in which to return the gpu attestation report * * @return - * - \ref NVML_SUCCESS if violation time is successfully retrieved + * - \ref NVML_SUCCESS if \a gpu attestation report has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); - +nvmlReturn_t DECLDIR nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice_t device, + nvmlConfComputeGpuAttestationReport_t *gpuAtstReport); /** - * Gets the device's interrupt number + * Get Conf Computing key rotation threshold detail. * - * @param device The identifier of the target device - * @param irqNum The interrupt number associated with the specified device + * For Hopper &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param pKeyRotationThrInfo Reference in which to return the key rotation threshold data * * @return - * - \ref NVML_SUCCESS if irq number is successfully retrieved + * - \ref NVML_SUCCESS if \a gpu key rotation threshold info has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int *irqNum); +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeKeyRotationThresholdInfo( + nvmlConfComputeGetKeyRotationThresholdInfo_t *pKeyRotationThrInfo); /** - * Gets the device's core count + * Set Conf Computing Unprotected Memory Size. * - * @param device The identifier of the target device - * @param numCores The number of cores for the specified device + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param device Device Handle + * @param sizeKiB Unprotected Memory size to be set in KiB * * @return - * - \ref NVML_SUCCESS if Gpu core count is successfully retrieved + * - \ref NVML_SUCCESS if \a sizeKiB successfully set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * */ -nvmlReturn_t DECLDIR nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int *numCores); +nvmlReturn_t DECLDIR nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice_t device, unsigned long long sizeKiB); /** - * Gets the devices power source + * Set Conf Computing GPUs ready state. * - * @param device The identifier of the target device - * @param powerSource The power source of the device + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. * - * @return - * - \ref NVML_SUCCESS if the current power source was successfully retrieved + * @param isAcceptingWork GPU accepting new work, NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE or + * NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE + * + * return + * - \ref NVML_SUCCESS if \a current GPUs ready state is successfully set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a isAcceptingWork is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t *powerSource); +nvmlReturn_t DECLDIR nvmlSystemSetConfComputeGpusReadyState(unsigned int isAcceptingWork); /** - * Gets the device's memory bus width + * Set Conf Computing key rotation threshold. * - * @param device The identifier of the target device - * @param busWidth The devices's memory bus width + * For Hopper &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * This function is to set the confidential compute key rotation threshold parameters. + * \a pKeyRotationThrInfo->maxAttackerAdvantage should be in the range from + * NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MIN to NVML_CC_KEY_ROTATION_THRESHOLD_ATTACKER_ADVANTAGE_MAX. + * Default value is 60. + * + * @param pKeyRotationThrInfo Reference to the key rotation threshold data * * @return - * - \ref NVML_SUCCESS if the memory bus width is successfully retrieved + * - \ref NVML_SUCCESS if \a key rotation threashold max attacker advantage has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_INVALID_STATE if confidential compute GPU ready state is enabled * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int *busWidth); +nvmlReturn_t DECLDIR nvmlSystemSetConfComputeKeyRotationThresholdInfo( + nvmlConfComputeSetKeyRotationThresholdInfo_t *pKeyRotationThrInfo); /** - * Gets the device's PCIE Max Link speed in MBPS + * Get Conf Computing System Settings. * - * @param device The identifier of the target device - * @param maxSpeed The devices's PCIE Max Link speed in MBPS + * For Hopper &tm; or newer fully supported devices. + * Supported on Linux, Windows TCC. + * + * @param settings System CC settings * * @return - * - \ref NVML_SUCCESS if Pcie Max Link Speed is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_SUCCESS If the query is success + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a counters is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST If the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the provided version is invalid/unsupported + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemGetConfComputeSettings(nvmlSystemConfComputeSettings_t *settings); + +/** + * Retrieve GSP firmware version. + * + * The caller passes in buffer via \a version and corresponding GSP firmware numbered version + * is returned with the same parameter in string format. * + * @param device Device handle + * @param version The retrieved GSP firmware version + * + * @return + * - \ref NVML_SUCCESS if GSP firmware version is sucessfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or GSP \a version pointer is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int *maxSpeed); +nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char *version); /** - * Gets the device's PCIe Link speed in Mbps + * Retrieve GSP firmware mode. * - * @param device The identifier of the target device - * @param pcieSpeed The devices's PCIe Max Link speed in Mbps + * The caller passes in integer pointers. GSP firmware enablement and default mode information is returned with + * corresponding parameters. The return value in \a isEnabled and \a defaultMode should be treated as boolean. + * + * @param device Device handle + * @param isEnabled Pointer to specify if GSP firmware is enabled + * @param defaultMode Pointer to specify if GSP firmware is supported by default on \a device * * @return - * - \ref NVML_SUCCESS if \a pcieSpeed has been retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pcieSpeed is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support PCIe speed getting + * - \ref NVML_SUCCESS if GSP firmware mode is sucessfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or any of \a isEnabled or \a defaultMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int *pcieSpeed); +nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int *isEnabled, unsigned int *defaultMode); /** - * Gets the device's Adaptive Clock status + * Get SRAM ECC error status of this device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlEccSramErrorStatus_v1_t for more information on the struct. * * @param device The identifier of the target device - * @param adaptiveClockStatus The current adaptive clocking status + * @param status Returns SRAM ECC error status * * @return - * - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * + * - \ref NVML_SUCCESS If \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a counters is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST If the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a nvmlEccSramErrorStatus_t is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int *adaptiveClockStatus); +nvmlReturn_t DECLDIR nvmlDeviceGetSramEccErrorStatus(nvmlDevice_t device, + nvmlEccSramErrorStatus_t *status); /** * @} @@ -5454,8 +7591,8 @@ nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned * * For Kepler &tm; or newer fully supported devices. * - * To just query the number of processes ready to be queried, call this function with *count = 0 and - * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. + * To query the number of processes under Accounting Mode, call this function with *count = 0 and pids=NULL. + * The return code will be NVML_ERROR_INSUFFICIENT_SIZE with an updated count value indicating the number of processes. * * For more details see \ref nvmlDeviceGetAccountingStats. * @@ -5513,7 +7650,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsi /** * Returns the list of retired pages by source, including pages that are pending retirement * The address information provided from this API is the hardware address of the page that was retired. Note - * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * that this does not match the virtual address used in CUDA, but will match the address information in Xid 63 * * For Kepler &tm; or newer fully supported devices. * @@ -5541,9 +7678,9 @@ nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageReti /** * Returns the list of retired pages by source, including pages that are pending retirement * The address information provided from this API is the hardware address of the page that was retired. Note - * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * that this does not match the virtual address used in CUDA, but will match the address information in Xid 63 * - * \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps paramter to return the time of each page's + * \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps parameter to return the time of each page's * retirement. * * For Kepler &tm; or newer fully supported devices. @@ -5586,62 +7723,195 @@ nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageR * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); + +/** + * Get number of remapped rows. The number of rows reported will be based on + * the cause of the remapping. isPending indicates whether or not there are + * pending remappings. A reset will be required to actually remap the row. + * failureOccurred will be set if a row remapping ever failed in the past. A + * pending remapping won't affect future work on the GPU since + * error-containment and dynamic page blacklisting will take care of that. + * + * @note On MIG-enabled GPUs with active instances, querying the number of + * remapped rows is not supported + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param corrRows Reference for number of rows remapped due to correctable errors + * @param uncRows Reference for number of rows remapped due to uncorrectable errors + * @param isPending Reference for whether or not remappings are pending + * @param failureOccurred Reference that is set when a remapping has failed in the past + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a corrRows, \a uncRows, \a isPending or \a failureOccurred is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN Unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int *corrRows, unsigned int *uncRows, + unsigned int *isPending, unsigned int *failureOccurred); + +/** + * Get the row remapper histogram. Returns the remap availability for each bank + * on the GPU. + * + * @param device Device handle + * @param values Histogram values + * + * @return + * - \ref NVML_SUCCESS On success + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t *values); + +/** + * Get architecture for device + * + * @param device The identifier of the target device + * @param arch Reference where architecture is returned, if call successful. + * Set to NVML_DEVICE_ARCH_* upon success + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a arch (output refererence) are invalid + */ +nvmlReturn_t DECLDIR nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t *arch); + +/** + * Retrieves the frequency monitor fault status for the device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root user. + * + * See \ref nvmlClkMonStatus_t for details on decoding the status output. + * + * @param device The identifier of the target device + * @param status Reference in which to return the clkmon fault status + * + * @return + * - \ref NVML_SUCCESS if \a status has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetClkMonStatus() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t *status); + +/** + * Retrieves the current utilization and process ID + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. + * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at + * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization + * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values + * are returned as "unsigned int" values. If no valid sample entries are found since the lastSeenTimeStamp, NVML_ERROR_NOT_FOUND + * is returned. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilization set to NULL. The caller should allocate a buffer of size + * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed + * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. + * + * On successful return, the function updates \a processSamplesCount with the number of process utilization sample + * structures that were actually written. This may differ from a previously read value as instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @note On MIG-enabled GPUs, querying process utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned + * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, + unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); /** - * Get number of remapped rows. The number of rows reported will be based on - * the cause of the remapping. isPending indicates whether or not there are - * pending remappings. A reset will be required to actually remap the row. - * failureOccurred will be set if a row remapping ever failed in the past. A - * pending remapping won't affect future work on the GPU since - * error-containment and dynamic page blacklisting will take care of that. + * Retrieves the recent utilization and process ID for all running processes * - * @note On MIG-enabled GPUs with active instances, querying the number of - * remapped rows is not supported + * For Maxwell &tm; or newer fully supported devices. * - * For Ampere &tm; or newer fully supported devices. + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder, jpeg decoder, OFA (Optical Flow Accelerator) + * for all running processes. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at + * by \a procesesUtilInfo->procUtilArray. One utilization sample structure is returned per process running, that had some non-zero utilization + * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values + * are returned as "unsigned int" values. * - * @param device The identifier of the target device - * @param corrRows Reference for number of rows remapped due to correctable errors - * @param uncRows Reference for number of rows remapped due to uncorrectable errors - * @param isPending Reference for whether or not remappings are pending - * @param failureOccurred Reference that is set when a remapping has failed in the past + * The caller should allocate a buffer of size processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t). If the buffer is too small, the API will + * return \a NVML_ERROR_INSUFFICIENT_SIZE, with the recommended minimal buffer size at \a procesesUtilInfo->processSamplesCount. The caller should + * invoke the function again with the allocated buffer passed in \a procesesUtilInfo->procUtilArray, and \a procesesUtilInfo->processSamplesCount + * set to the number no less than the recommended value by the previous API return. * - * @return - * - \ref NVML_SUCCESS Upon success - * - \ref NVML_ERROR_INVALID_ARGUMENT If \a corrRows, \a uncRows, \a isPending or \a failureOccurred is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN Unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int *corrRows, unsigned int *uncRows, - unsigned int *isPending, unsigned int *failureOccurred); - -/** - * Get the row remapper histogram. Returns the remap availability for each bank - * on the GPU. + * On successful return, the function updates \a procesesUtilInfo->processSamplesCount with the number of process utilization info structures + * that were actually written. This may differ from a previously read value as instances are created or destroyed. * - * @param device Device handle - * @param values Histogram values + * \a procesesUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a procesesUtilInfo->lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. * + * \a procesesUtilInfo->version is the version number of the structure nvmlProcessesUtilizationInfo_t, the caller should set the correct version + * number to retrieve the specific version of processes utilization information. + * + * @note On MIG-enabled GPUs, querying process utilization is not currently supported. + * + * @param device The identifier of the target device + * @param procesesUtilInfo Pointer to the caller-provided structure of nvmlProcessesUtilizationInfo_t. + * @return - * - \ref NVML_SUCCESS On success - * - \ref NVML_ERROR_UNKNOWN On any unexpected error + * - \ref NVML_SUCCESS If \a procesesUtilInfo->procUtilArray has been populated + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, or \a procesesUtilInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature + * - \ref NVML_ERROR_NOT_FOUND If sample entries are not found + * - \ref NVML_ERROR_GPU_IS_LOST If the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a procesesUtilInfo is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE If \a procesesUtilInfo->procUtilArray is NULL, or the buffer size of procesesUtilInfo->procUtilArray is too small. + * The caller should check the minimul array size from the returned procesesUtilInfo->processSamplesCount, and call + * the function again with a buffer no smaller than procesesUtilInfo->processSamplesCount * sizeof(nvmlProcessUtilizationInfo_t) + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t *values); +nvmlReturn_t DECLDIR nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice_t device, nvmlProcessesUtilizationInfo_t *procesesUtilInfo); /** - * Get architecture for device + * Get platform information of this device. + * + * %BLACKWELL_OR_NEWER% + * + * See \ref nvmlPlatformInfo_v2_t for more information on the struct. * * @param device The identifier of the target device - * @param arch Reference where architecture is returned, if call successful. - * Set to NVML_DEVICE_ARCH_* upon success + * @param platformInfo Pointer to the caller-provided structure of nvmlPlatformInfo_t. * * @return - * - \ref NVML_SUCCESS Upon success - * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a arch (output refererence) are invalid + * - \ref NVML_SUCCESS If \a platformInfo has been retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a platformInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature + * - \ref NVML_ERROR_MEMORY if system memory is insufficient + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a nvmlPlatformInfo_t is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t *arch); +nvmlReturn_t DECLDIR nvmlDeviceGetPlatformInfo(nvmlDevice_t device, nvmlPlatformInfo_t *platformInfo); /** @} */ @@ -5880,7 +8150,7 @@ typedef enum nvmlClockLimitId_enum { * Set clocks that device will lock to. * * Sets the clocks that the device will be running at to the value in the range of minGpuClockMHz to maxGpuClockMHz. - * Setting this will supercede application clock values and take effect regardless if a cuda app is running. + * Setting this will supersede application clock values and take effect regardless if a cuda app is running. * See /ref nvmlDeviceSetApplicationsClocks * * Can be used as a setting to request constant performance. @@ -6039,27 +8309,154 @@ nvmlReturn_t DECLDIR nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device); nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); /** - * Retrieves the frequency monitor fault status for the device. + * Resets the application clock to the default value * - * For Ampere &tm; or newer fully supported devices. - * Requires root user. + * This is the applications clock that will be used after system reboot or driver reload. + * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. * - * See \ref nvmlClkMonStatus_t for details on decoding the status output. + * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, + * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above + * base clocks as thermal limits allow. + * + * @see nvmlDeviceGetApplicationsClock + * @see nvmlDeviceSetApplicationsClocks + * + * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. * * @param device The identifier of the target device - * @param status Reference in which to return the clkmon fault status * * @return - * - \ref NVML_SUCCESS if \a status has been set + * - \ref NVML_SUCCESS if new settings were successfully set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); + +/** + * Try to set the current state of Auto Boosted clocks on a device. + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * Non-root users may use this API by default but can be restricted by root from using this API by calling + * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. + * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set Auto Boosted clocks of the target device to + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error * - * @see nvmlDeviceGetClkMonStatus() */ -nvmlReturn_t DECLDIR nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t *status); +nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); + +/** + * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will + * return to when no compute running processes (e.g. CUDA application which have an active context) are running + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set default Auto Boosted clocks of the target device to + * @param flags Flags that change the default behavior. Currently Unused. + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); + +/** + * Sets the speed of the fan control policy to default. + * + * For all cuda-capable discrete products with fans + * + * @param device The identifier of the target device + * @param fan The index of the fan, starting at zero + * + * return + * NVML_SUCCESS if speed has been adjusted + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if device is invalid + * NVML_ERROR_NOT_SUPPORTED if the device does not support this + * (doesn't have fans) + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan); + +/** + * Sets current fan control policy. + * + * For Maxwell &tm; or newer fully supported devices. + * + * Requires privileged user. + * + * For all cuda-capable discrete products with fans + * + * device The identifier of the target \a device + * policy The fan control \a policy to set + * + * return + * NVML_SUCCESS if \a policy has been set + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference + * a fan that exists. + * NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, + nvmlFanControlPolicy_t policy); + +/** + * Sets the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Maxwell &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value to be set + * @param temp Reference which hold the value to be set + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int *temp); /** * Set new power limit of this device. @@ -6142,10 +8539,70 @@ nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuO * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); + +/** + * Sets the speed of a specified fan. + * + * WARNING: This function changes the fan control policy to manual. It means that YOU have to monitor + * the temperature and adjust the fan speed accordingly. + * If you set the fan speed too low you can burn your GPU! + * Use nvmlDeviceSetDefaultFanSpeed_v2 to restore default control policy. + * + * For all cuda-capable discrete products with fans that are Maxwell or Newer. + * + * device The identifier of the target device + * fan The index of the fan, starting at zero + * speed The target speed of the fan [0-100] in % of max speed + * + * return + * NVML_SUCCESS if the fan speed has been set + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if the device is not valid, or the speed is outside acceptable ranges, + * or if the fan index doesn't reference an actual fan. + * NVML_ERROR_NOT_SUPPORTED if the device is older than Maxwell. + * NVML_ERROR_UNKNOWN if there was an unexpected error. + */ +nvmlReturn_t DECLDIR nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed); + +/** + * Deprecated: Will be deprecated in a future release. Use \ref nvmlDeviceSetClockOffsets instead. It works + * on Maxwell onwards GPU architectures. + * + * Set the GPCCLK VF offset value + * @param[in] device The identifier of the target device + * @param[in] offset The GPCCLK VF offset value to set + * + * @return + * - \ref NVML_SUCCESS if \a offset has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpcClkVfOffset(nvmlDevice_t device, int offset); + +/** + * Deprecated: Will be deprecated in a future release. Use \ref nvmlDeviceSetClockOffsets instead. It works + * on Maxwell onwards GPU architectures. + * + * Set the MemClk (Memory Clock) VF offset value. It requires elevated privileges. + * @param[in] device The identifier of the target device + * @param[in] offset The MemClk VF offset value to set + * + * @return + * - \ref NVML_SUCCESS if \a offset has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); +nvmlReturn_t DECLDIR nvmlDeviceSetMemClkVfOffset(nvmlDevice_t device, int offset); /** * @} @@ -6209,6 +8666,99 @@ nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnable */ nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); +/** + * Set new power limit of this device. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. + * + * See \ref nvmlPowerValue_v2_t for more information on the struct. + * + * \note Limit is not persistent across reboots or driver unloads. + * Enable persistent mode to prevent driver from unloading when no application is using the device. + * + * This API replaces nvmlDeviceSetPowerManagementLimit. It can be used as a drop-in replacement for the older version. + * + * @param device The identifier of the target device + * @param powerValue Power management limit in milliwatts to set + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a powerValue is NULL or contains invalid values + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see NVML_FI_DEV_POWER_AVERAGE + * @see NVML_FI_DEV_POWER_INSTANT + * @see NVML_FI_DEV_POWER_MIN_LIMIT + * @see NVML_FI_DEV_POWER_MAX_LIMIT + * @see NVML_FI_DEV_POWER_CURRENT_LIMIT + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice_t device, nvmlPowerValue_v2_t *powerValue); + +/***************************************************************************************************/ +/** @defgroup NVML NVLink + * @{ + */ +/***************************************************************************************************/ + +#define NVML_NVLINK_BER_MANTISSA_SHIFT 8 +#define NVML_NVLINK_BER_MANTISSA_WIDTH 0xf + +#define NVML_NVLINK_BER_EXP_SHIFT 0 +#define NVML_NVLINK_BER_EXP_WIDTH 0xff + +/** + * Nvlink Error counter BER can be obtained using the below macros + * Ex - NVML_NVLINK_ERROR_COUNTER_BER_GET(var, BER_MANTISSA) + */ +#define NVML_NVLINK_ERROR_COUNTER_BER_GET(var, type) \ + (((var) >> NVML_NVLINK_##type##_SHIFT) & \ + (NVML_NVLINK_##type##_WIDTH)) \ + +/* + * NVML_FI_DEV_NVLINK_GET_STATE state enums + */ +#define NVML_NVLINK_STATE_INACTIVE 0x0 +#define NVML_NVLINK_STATE_ACTIVE 0x1 +#define NVML_NVLINK_STATE_SLEEP 0x2 + +#define NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES 23 + +typedef struct +{ + unsigned int version; + unsigned char bwModes[NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES]; + unsigned char totalBwModes; +} nvmlNvlinkSupportedBwModes_v1_t; +typedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t; +#define nvmlNvlinkSupportedBwModes_v1 NVML_STRUCT_VERSION(NvlinkSupportedBwModes, 1) + +typedef struct +{ + unsigned int version; + unsigned int bIsBest; + unsigned char bwMode; +} nvmlNvlinkGetBwMode_v1_t; +typedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t; +#define nvmlNvlinkGetBwMode_v1 NVML_STRUCT_VERSION(NvlinkGetBwMode, 1) + +typedef struct +{ + unsigned int version; + unsigned int bSetBest; + unsigned char bwMode; +} nvmlNvlinkSetBwMode_v1_t; +typedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t; +#define nvmlNvlinkSetBwMode_v1 NVML_STRUCT_VERSION(NvlinkSetBwMode, 1) + +/** @} */ // @defgroup NVML NVLink + + /** @} */ /***************************************************************************************************/ @@ -6245,7 +8795,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int * * @param device The identifier of the target device * @param link Specifies the NvLink link to be queried - * @param version Requested NvLink version + * @param version Requested NvLink version from nvmlNvlinkVersion_t * * @return * - \ref NVML_SUCCESS if \a version has been set @@ -6474,6 +9024,102 @@ nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t devic */ nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t *pNvLinkDeviceType); +/** + * Set NvLink Low Power Threshold for device. + * + * For Hopper &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param info Reference to \a nvmlNvLinkPowerThres_t struct + * input parameters + * + * @return + * - \ref NVML_SUCCESS if the \a Threshold is successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a Threshold is not within range + * - \ref NVML_ERROR_NOT_READY if an internal driver setting prevents the threshold from being used + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * + **/ +nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t *info); + +/** + * Set the global nvlink bandwith mode + * + * @param nvlinkBwMode nvlink bandwidth mode + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid argument is provided + * - \ref NVML_ERROR_IN_USE if P2P object exists + * - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture. + * - \ref NVML_ERROR_NO_PERMISSION if not root user + */ +nvmlReturn_t DECLDIR nvmlSystemSetNvlinkBwMode(unsigned int nvlinkBwMode); + +/** + * Get the global nvlink bandwith mode + * + * @param nvlinkBwMode reference of nvlink bandwidth mode + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided + * - \ref NVML_ERROR_NOT_SUPPORTED if GPU is not Hopper or newer architecture. + * - \ref NVML_ERROR_NO_PERMISSION if not root user + */ +nvmlReturn_t DECLDIR nvmlSystemGetNvlinkBwMode(unsigned int *nvlinkBwMode); + +/** + * Get the supported NvLink Reduced Bandwidth Modes of the device + * + * %BLACKWELL_OR_NEWER% + * + * @param device The identifier of the target device + * @param supportedBwMode Reference to \a nvmlNvlinkSupportedBwModes_t + * + * @return + * - \ref NVML_SUCCESS if the query was successful + * - \ref NVML_ERROR_INVALID_ARGUMENT if device is invalid or supportedBwMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this feature is not supported by the device + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the version specified is not supported + **/ +nvmlReturn_t DECLDIR nvmlDeviceGetNvlinkSupportedBwModes(nvmlDevice_t device, + nvmlNvlinkSupportedBwModes_t *supportedBwMode); + +/** + * Get the NvLink Reduced Bandwidth Mode for the device + * + * %BLACKWELL_OR_NEWER% + * + * @param device The identifier of the target device + * @param getBwMode Reference to \a nvmlNvlinkGetBwMode_t + * + * @return + * - \ref NVML_SUCCESS if the query was successful + * - \ref NVML_ERROR_INVALID_ARGUMENT if device is invalid or getBwMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this feature is not supported by the device + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the version specified is not supported + **/ +nvmlReturn_t DECLDIR nvmlDeviceGetNvlinkBwMode(nvmlDevice_t device, + nvmlNvlinkGetBwMode_t *getBwMode); + +/** + * Set the NvLink Reduced Bandwidth Mode for the device + * + * %BLACKWELL_OR_NEWER% + * + * @param device The identifier of the target device + * @param setBwMode Reference to \a nvmlNvlinkSetBwMode_t + * + * @return + * - \ref NVML_SUCCESS if the Bandwidth mode was successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT if device is invalid or setBwMode is NULL + * - \ref NVML_ERROR_NO_PERMISSION if user does not have permission to change Bandwidth mode + * - \ref NVML_ERROR_NOT_SUPPORTED if this feature is not supported by the device + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the version specified is not supported + **/ +nvmlReturn_t DECLDIR nvmlDeviceSetNvlinkBwMode(nvmlDevice_t device, + nvmlNvlinkSetBwMode_t *setBwMode); + /** @} */ /***************************************************************************************************/ @@ -6505,13 +9151,11 @@ nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t * * For Fermi &tm; or newer fully supported devices. - * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) + * ECC events are available only on ECC-enabled devices (see \ref nvmlDeviceGetTotalEccErrors) * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) * * For Linux only. * - * \b IMPORTANT: Operations on \a set are not thread safe - * * This call starts recording of events on specific device. * All events that occurred before this call are not recorded. * Checking if some event occurred can be done with \ref nvmlEventSetWait_v2 @@ -6571,11 +9215,11 @@ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsig * but not longer than specified timeout. This function in certain conditions can return before * specified timeout passes (e.g. when interrupt arrives) * - * On Windows, in case of xid error, the function returns the most recent xid error type seen by the system. - * If there are multiple xid errors generated before nvmlEventSetWait is invoked then the last seen xid error - * type is returned for all xid error events. + * On Windows, in case of Xid error, the function returns the most recent Xid error type seen by the system. + * If there are multiple Xid errors generated before nvmlEventSetWait is invoked then the last seen Xid error + * type is returned for all Xid error events. * - * On Linux, every xid error event would return the associated event data and other information if applicable. + * On Linux, every Xid error event would return the associated event data and other information if applicable. * * In MIG mode, if device handle is provided, the API reports all the events for the available instances, * only if the caller has appropriate privileges. In absence of required privileges, only the events which @@ -6616,6 +9260,98 @@ nvmlReturn_t DECLDIR nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t * d */ nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); +/* + * Create an empty set of system events. + * Event set should be freed by \ref nvmlSystemEventSetFree + * + * For Fermi &tm; or newer fully supported devices. + * @param request Reference to nvmlSystemEventSetCreateRequest_t + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if request is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH for unsupported version + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlSystemEventSetFree + */ +nvmlReturn_t DECLDIR nvmlSystemEventSetCreate(nvmlSystemEventSetCreateRequest_t *request); + +/** + * Releases system event set + * + * For Fermi &tm; or newer fully supported devices. + * + * @param set Reference to nvmlSystemEventSetFreeRequest_t + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if request is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH for unsupported version + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlSystemEventSetFree(nvmlSystemEventSetFreeRequest_t *request); + +/** + * Starts recording of events on system and add the events to specified \ref nvmlSystemEventSet_t + * + * For Linux only. + * + * This call starts recording of events on specific device. + * All events that occurred before this call are not recorded. + * Checking if some event occurred can be done with \ref nvmlSystemEventSetWait + * + * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. + * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes + * are registered in that case. + * + * @param request Reference to the struct nvmlSystemRegisterEventRequest_t + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if request is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH for unsupported version + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlSystemEventType + * @see nvmlSystemEventSetWait + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlSystemRegisterEvents(nvmlSystemRegisterEventRequest_t *request); + +/** + * Waits on system events and delivers events + * + * For Fermi &tm; or newer fully supported devices. + * + * If some events are ready to be delivered at the time of the call, function returns immediately. + * If there are no events ready to be delivered, function sleeps till event arrives + * but not longer than specified timeout. This function in certain conditions can return before + * specified timeout passes (e.g. when interrupt arrives) + * + * if the return request->numEvent equals to request->dataSize, there might be outstanding + * event, it is recommended to call nvmlSystemEventSetWait again to query all the events. + * + * @param request Reference in which to nvmlSystemEventSetWaitRequest_t + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if request is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH for unsupported version + * - \ref NVML_ERROR_TIMEOUT if no event notification after timeoutms + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlSystemEventType + * @see nvmlSystemRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlSystemEventSetWait(nvmlSystemEventSetWaitRequest_t *request); + /** @} */ /***************************************************************************************************/ @@ -6777,13 +9513,6 @@ nvmlReturn_t DECLDIR nvmlDeviceClearFieldValues(nvmlDevice_t device, int valuesC /** @} */ -/***************************************************************************************************/ -/** @defgroup vGPU Enums, Constants and Structs - * @{ - */ -/** @} */ -/***************************************************************************************************/ - /***************************************************************************************************/ /** @defgroup nvmlVirtualGpuQueries vGPU APIs * This chapter describes operations that are associated with NVIDIA vGPU Software products. @@ -6835,9 +9564,9 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpu * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? * * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is set + * - \ref NVML_SUCCESS if \a virtualMode is set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a virtualMode is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. @@ -6845,97 +9574,216 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpu nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); /** - * Retrieve the vGPU Software licensable features. + * Get the vGPU heterogeneous mode for the device. * - * Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s) - * and their current license status. + * When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes. * - * @param device Identifier of the target device - * @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned + * On successful return, the function returns \a pHeterogeneousMode->mode with the current vGPU heterogeneous mode. + * \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should + * set the correct version number to retrieve the vGPU heterogeneous mode. + * \a pHeterogeneousMode->mode can either be \ref NVML_FEATURE_ENABLED or \ref NVML_FEATURE_DISABLED. + * + * @param device The identifier of the target device + * @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t * * @return - * - \ref NVML_SUCCESS if licensable features are successfully retrieved - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a pHeterogeneousMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device doesn't support this feature + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice_t device, nvmlVgpuHeterogeneousMode_t *pHeterogeneousMode); /** - * Retrieves the current utilization and process ID + * Enable or disable vGPU heterogeneous mode for the device. * - * For Maxwell &tm; or newer fully supported devices. + * When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes. * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. - * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at - * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization - * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values - * are returned as "unsigned int" values. + * API would return an appropriate error code upon unsuccessful activation. For example, the heterogeneous mode + * set will fail with error \ref NVML_ERROR_IN_USE if any vGPU instance is active on the device. The caller of this API + * is expected to shutdown the vGPU VMs and retry setting the \a mode. + * On KVM platform, setting heterogeneous mode is allowed, if no MDEV device is created on the device, else will fail + * with same error \ref NVML_ERROR_IN_USE. + * On successful return, the function updates the vGPU heterogeneous mode with the user provided \a pHeterogeneousMode->mode. + * \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should + * set the correct version number to set the vGPU heterogeneous mode. * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilization set to NULL. The caller should allocate a buffer of size - * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed - * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. + * @param device Identifier of the target device + * @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t * - * On successful return, the function updates \a processSamplesCount with the number of process utilization sample - * structures that were actually written. This may differ from a previously read value as instances are created or - * destroyed. + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a pHeterogeneousMode is NULL or \a pHeterogeneousMode->mode is invalid + * - \ref NVML_ERROR_IN_USE If the \a device is in use + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device doesn't support this feature + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice_t device, const nvmlVgpuHeterogeneousMode_t *pHeterogeneousMode); + +/** + * Query the placement ID of active vGPU instance. * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * When in vGPU heterogeneous mode, this function returns a valid placement ID as \a pPlacement->placementId + * else NVML_INVALID_VGPU_PLACEMENT_ID is returned. + * \a pPlacement->version is the version number of the structure nvmlVgpuPlacementId_t, the caller should + * set the correct version number to get placement id of the vGPU instance \a vgpuInstance. * - * @note On MIG-enabled GPUs, querying process utilization is not currently supported. + * @param vgpuInstance Identifier of the target vGPU instance + * @param pPlacement Pointer to vGPU placement ID structure \a nvmlVgpuPlacementId_t * - * @param device The identifier of the target device - * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned - * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @return + * - \ref NVML_SUCCESS If information is successfully retrieved + * - \ref NVML_ERROR_NOT_FOUND If \a vgpuInstance does not match a valid active vGPU instance + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuInstance is invalid or \a pPlacement is NULL + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pPlacement is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuPlacementId_t *pPlacement); +/** + * Query the supported vGPU placement ID of the vGPU type. + * + * The function returns an array of supported vGPU placement IDs for the specified vGPU type ID in the buffer provided + * by the caller at \a pPlacementList->placementIds. The required memory for the placementIds array must be allocated + * based on the maximum number of vGPU type instances, which is retrievable through \ref nvmlVgpuTypeGetMaxInstances(). + * If the provided count by the caller is insufficient, the function will return NVML_ERROR_INSUFFICIENT_SIZE along with + * the number of required entries in \a pPlacementList->count. The caller should then reallocate a buffer with the size + * of pPlacementList->count * sizeof(pPlacementList->placementIds) and invoke the function again. + * + * To obtain a list of homogeneous placement IDs, the caller needs to set \a pPlacementList->mode to NVML_VGPU_PGPU_HOMOGENEOUS_MODE. + * For heterogeneous placement IDs, \a pPlacementList->mode should be set to NVML_VGPU_PGPU_HETEROGENEOUS_MODE. + * By default, a list of heterogeneous placement IDs is returned. + * + * @param device Identifier of the target device + * @param vgpuTypeId Handle to vGPU type. The vGPU type ID + * @param pPlacementList Pointer to the vGPU placement structure \a nvmlVgpuPlacementList_t + * * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device or \a vgpuTypeId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pPlacementList is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE If the buffer is small, element count is returned in \a pPlacementList->count + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, - unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t *pPlacementList); /** - * Retrieve GSP firmware version. + * Query the creatable vGPU placement ID of the vGPU type. * - * The caller passes in buffer via \a version and corresponding GSP firmware numbered version - * is returned with the same parameter in string format. + * An array of creatable vGPU placement IDs for the vGPU type ID indicated by \a vgpuTypeId is returned in the + * caller-supplied buffer of \a pPlacementList->placementIds. Memory needed for the placementIds array should be + * allocated based on maximum instances of a vGPU type which can be queried via \ref nvmlVgpuTypeGetMaxInstances(). + * If the provided count by the caller is insufficient, the function will return NVML_ERROR_INSUFFICIENT_SIZE along with + * the number of required entries in \a pPlacementList->count. The caller should then reallocate a buffer with the size + * of pPlacementList->count * sizeof(pPlacementList->placementIds) and invoke the function again. * - * @param device Device handle - * @param version The retrieved GSP firmware version + * The creatable vGPU placement IDs may differ over time, as there may be restrictions on what type of vGPU the + * vGPU instance is running. + * + * @param device The identifier of the target device + * @param vgpuTypeId Handle to vGPU type. The vGPU type ID + * @param pPlacementList Pointer to the list of vGPU placement structure \a nvmlVgpuPlacementList_t * * @return - * - \ref NVML_SUCCESS if GSP firmware version is sucessfully retrieved - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or GSP \a version pointer is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a vgpuTypeId is invalid or \a pPlacementList is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device or \a vgpuTypeId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pPlacementList is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char *version); +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t *pPlacementList); /** - * Retrieve GSP firmware mode. + * Retrieve the static GSP heap size of the vGPU type in bytes + * + * @param vgpuTypeId Handle to vGPU type + * @param gspHeapSize Reference to return the GSP heap size value + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a gspHeapSize is NULL + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *gspHeapSize); + +/** + * Retrieve the static framebuffer reservation of the vGPU type in bytes + * + * @param vgpuTypeId Handle to vGPU type + * @param fbReservation Reference to return the framebuffer reservation + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuTypeId is invalid, or \a fbReservation is NULL + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbReservation); + +/** + * Retrieve the currently used runtime state size of the vGPU instance + * + * This size represents the maximum in-memory data size utilized by a vGPU instance during standard operation. + * This measurement is exclusive of frame buffer (FB) data size assigned to the vGPU instance. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param pState Pointer to the vGPU runtime state's structure \a nvmlVgpuRuntimeState_t + * + * @return + * - \ref NVML_SUCCESS If information is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuInstance is invalid, or \a pState is NULL + * - \ref NVML_ERROR_NOT_FOUND If \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pState is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetRuntimeStateSize(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuRuntimeState_t *pState); + +/** + * Set the desirable vGPU capability of a device + * + * Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be set. + * See \ref nvmlEnableState_t for available state. + * + * @param device The identifier of the target device + * @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be set + * @param state The target capability mode + * + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, or \a capability is invalid, or \a state is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state, or \a device not in vGPU mode + * - \ref NVML_ERROR_UNKNOWN On any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, nvmlEnableState_t state); + +/** + * Retrieve the vGPU Software licensable features. * - * The caller passes in integer pointers. GSP firmware enablement and default mode information is returned with - * corresponding parameters. The return value in \a isEnabled and \a defaultMode should be treated as boolean. + * Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s) + * and their current license status. * - * @param device Device handle - * @param isEnabled Pointer to specify if GSP firmware is enabled - * @param defaultMode Pointer to specify if GSP firmware is supported by default on \a device + * @param device Identifier of the target device + * @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned * * @return - * - \ref NVML_SUCCESS if GSP firmware mode is sucessfully retrieved - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or any of \a isEnabled or \a defaultMode is NULL + * - \ref NVML_SUCCESS if licensable features are successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int *isEnabled, unsigned int *defaultMode); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); /** @} */ @@ -6972,14 +9820,14 @@ nvmlReturn_t DECLDIR nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t ca * Retrieve the requested vGPU capability for GPU. * * Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be queried. - * The return value in \a capResult should be treated as a boolean, with a non-zero value indicating that the capability - * is supported. + * The return value in \a capResult reports a non-zero value indicating that the capability + * is supported, and also reports the capability's data based on the queried capability. * * For Maxwell &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be queried - * @param capResult A boolean for the queried capability indicating that feature is supported + * @param capResult Specifies that the queried capability is supported, and also returns capability's data * * @return * - \ref NVML_SUCCESS successful completion @@ -6997,7 +9845,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuCapabilities(nvmlDevice_t device, nvmlDevi * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount * is used to return the number of vGPU types written to the buffer. * - * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns + * If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. @@ -7026,9 +9874,9 @@ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned i * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable * list will be restricted to whatever vGPU type is already running on the device. * - * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns + * If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types createable for the GPU, call this function with *vgpuCount = 0. + * To query the number of vGPU types that can be created for the GPU, call this function with *vgpuCount = 0. * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. * * @param device The identifier of the target device @@ -7108,7 +9956,7 @@ nvmlReturn_t DECLDIR nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTy * * @param vgpuTypeId Handle to vGPU type * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value - * @param subsystemID Subsytem ID and subsytem vendor ID of the device contained in single 32 bit value + * @param subsystemID Subsystem ID and subsystem vendor ID of the device contained in single 32 bit value * * @return * - \ref NVML_SUCCESS successful completion @@ -7243,14 +10091,30 @@ nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTy */ nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCountPerVm); +/** + * Retrieve the BAR1 info for given vGPU type. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param bar1Info Pointer to the vGPU type BAR1 information structure \a nvmlVgpuTypeBar1Info_t + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a bar1Info is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetBAR1Info(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuTypeBar1Info_t *bar1Info); + /** * Retrieve the active vGPU instances on a device. * * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The - * array elememt count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances + * array element count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances * written to the buffer. * - * If the supplied buffer is not large enough to accomodate the vGPU instance array, the function returns + * If the supplied buffer is not large enough to accommodate the vGPU instance array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. @@ -7451,7 +10315,7 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t vgpuInstance, * @param encoderCapacity Reference to an unsigned int for the encoder capacity * * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been retrived + * - \ref NVML_SUCCESS if \a encoderCapacity has been retrieved * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderQueryType is invalid * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system @@ -7504,7 +10368,7 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInst * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions * written to the buffer. * - * If the supplied buffer is not large enough to accomodate the active session array, the function returns + * If the supplied buffer is not large enough to accommodate the active session array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. @@ -7534,7 +10398,7 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuI * For Maxwell &tm; or newer fully supported devices. * * @param vgpuInstance Identifier of the target vGPU instance -* @param fbcStats Reference to nvmlFBCStats_t structure contianing NvFBC stats +* @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats * * @return * - \ref NVML_SUCCESS if \a fbcStats is fetched @@ -7552,7 +10416,7 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions * written to the buffer. * -* If the supplied buffer is not large enough to accomodate the active session array, the function returns +* If the supplied buffer is not large enough to accommodate the active session array, the function returns * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. * To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return * NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. @@ -7633,6 +10497,262 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance */ nvmlReturn_t DECLDIR nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuCapability_t capability, unsigned int *capResult); +/** + * Retrieve the MDEV UUID of a vGPU instance. + * + * The MDEV UUID is a globally unique identifier of the mdev device assigned to the VM, and is returned as a 5-part hexadecimal string, + * not exceeding 80 characters in length (including the NULL terminator). + * MDEV UUID is displayed only on KVM platform. + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param mdevUuid Pointer to caller-supplied buffer to hold MDEV UUID + * @param size Size of buffer in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED on any hypervisor other than KVM + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mdevUuid is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char *mdevUuid, unsigned int size); + +/** + * Query the currently creatable vGPU types on a specific GPU Instance. + * + * The function returns an array of vGPU types that can be created for a specified GPU instance. This array is stored + * in a caller-supplied buffer, with the buffer's element count passed through \a pVgpus->vgpuCount. The number of + * vGPU types written to the buffer is indicated by \a pVgpus->vgpuCount. If the buffer is too small to hold the vGPU + * type array, the function returns NVML_ERROR_INSUFFICIENT_SIZE and updates \a pVgpus->vgpuCount with the required + * element count. + * + * To determine the creatable vGPUs for a GPU Instance, invoke this function with \a pVgpus->vgpuCount set to 0 and + * \a pVgpus->vgpuTypeIds as NULL. This will result in NVML_ERROR_INSUFFICIENT_SIZE being returned, along with the + * count value in \a pVgpus->vgpuCount. + * + * The creatable vGPU types may differ over time, as there may be restrictions on what type of vGPUs can concurrently + * run on the device. + * + * @param gpuInstance The GPU instance handle + * @param pVgpus Pointer to the caller-provided structure of nvmlVgpuTypeIdInfo_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pVgpus is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU + * - \ref NVML_ERROR_INSUFFICIENT_SIZE If \a pVgpus->vgpuTypeIds buffer is small + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pVgpus is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetCreatableVgpus(nvmlGpuInstance_t gpuInstance, nvmlVgpuTypeIdInfo_t *pVgpus); + +/** + * Retrieve the maximum number of vGPU instances per GPU instance for given vGPU type + * + * @param pMaxInstance Pointer to the caller-provided structure of nvmlVgpuTypeMaxInstance_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a pMaxInstance is NULL or \a pMaxInstance->vgpuTypeId is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU or non-MIG vGPU type + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pMaxInstance is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstancesPerGpuInstance(nvmlVgpuTypeMaxInstance_t *pMaxInstance); + +/** + * Retrieve the active vGPU instances within a GPU instance. + * + * An array of active vGPU instances is returned in the caller-supplied buffer pointed + * at by \a pVgpuInstanceInfo->vgpuInstances. The array element count is passed in + * \a pVgpuInstanceInfo->vgpuCount, and \a pVgpuInstanceInfo->vgpuCount is used to return + * the number of vGPU instances written to the buffer. + * + * If the supplied buffer is not large enough to accommodate the vGPU instance array, + * the function returns NVML_ERROR_INSUFFICIENT_SIZE, with the element count of + * nvmlVgpuInstance_t array required in \a pVgpuInstanceInfo->vgpuCount. To query the + * number of active vGPU instances, call this function with pVgpuInstanceInfo->vgpuCount = 0 + * and pVgpuInstanceInfo->vgpuTypeIds = NULL. The code will return NVML_ERROR_INSUFFICIENT_SIZE, + * or NVML_SUCCESS if no vGPU Types are active. + * + * @param gpuInstance The GPU instance handle + * @param pVgpuInstanceInfo Pointer to the vGPU instance information structure \a nvmlActiveVgpuInstanceInfo_t + * + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pVgpuInstanceInfo is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a pVgpuInstanceInfo->vgpuTypeIds buffer is too small, + * array element count is returned in \a pVgpuInstanceInfo->vgpuCount + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pVgpuInstanceInfo is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetActiveVgpus(nvmlGpuInstance_t gpuInstance, nvmlActiveVgpuInstanceInfo_t *pVgpuInstanceInfo); + +/** + * Set vGPU scheduler state for the given GPU instance + * + * %GB20X_OR_NEWER% + * + * Scheduler state and params will be allowed to set only when no VM is running within the GPU instance. + * In \a nvmlVgpuSchedulerState_t, IFF enableARRMode is enabled then provide the avgFactor and frequency + * as input. If enableARRMode is disabled then provide timeslice as input. + * + * The scheduler state change won't persist across module load/unload and GPU Instance creation/deletion. + * + * @param gpuInstance The GPU instance handle + * @param pScheduler Pointer to the caller-provided structure of nvmlVgpuSchedulerState_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pScheduler is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_RESET_REQUIRED If setting the state failed with fatal error, reboot is required + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU or if any vGPU instance exists + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pScheduler is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceSetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerState_t *pScheduler); + +/** + * Returns the vGPU scheduler state for the given GPU instance. + * The information returned in \a nvmlVgpuSchedulerStateInfo_t is not relevant if the BEST EFFORT policy is set. + * + * %GB20X_OR_NEWER% + * + * @param gpuInstance The GPU instance handle + * @param pSchedulerStateInfo Reference in which \a pSchedulerStateInfo is returned + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler state is successfully obtained + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pSchedulerStateInfo is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pSchedulerStateInfo is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerStateInfo_t *pSchedulerStateInfo); + +/** + * Returns the vGPU scheduler logs for the given GPU instance. + * \a pSchedulerLogInfo points to a caller-allocated structure to contain the logs. The number of elements returned will + * never exceed \a NVML_SCHEDULER_SW_MAX_LOG_ENTRIES. + * + * To get the entire logs, call the function atleast 5 times a second. + * + * %GB20X_OR_NEWER% + * + * @param gpuInstance The GPU instance handle + * @param pSchedulerLogInfo Reference in which \a pSchedulerLogInfo is written + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler logs are successfully obtained + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pSchedulerLogInfo is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pSchedulerLogInfo is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetVgpuSchedulerLog(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerLogInfo_t *pSchedulerLogInfo); + +/** + * Query the creatable vGPU placement ID of the vGPU type within a GPU instance. + * + * %GB20X_OR_NEWER% + * + * An array of creatable vGPU placement IDs for the vGPU type ID indicated by \a pCreatablePlacementInfo->vgpuTypeId + * is returned in the caller-supplied buffer of \a pCreatablePlacementInfo->placementIds. Memory needed for the + * placementIds array should be allocated based on maximum instances of a vGPU type per GPU instance which can be + * queried via \ref nvmlVgpuTypeGetMaxInstancesPerGpuInstance(). + * If the provided count by the caller is insufficient, the function will return NVML_ERROR_INSUFFICIENT_SIZE along with + * the number of required entries in \a pCreatablePlacementInfo->count. The caller should then reallocate a buffer with the size + * of pCreatablePlacementInfo->count * sizeof(pCreatablePlacementInfo->placementIds) and invoke the function again. + * The creatable vGPU placement IDs may differ over time, as there may be restrictions on what type of vGPU the + * vGPU instance is running. + * + * @param gpuInstance The GPU instance handle + * @param pCreatablePlacementInfo Pointer to the list of vGPU creatable placement structure \a nvmlVgpuCreatablePlacementInfo_t + * + * @return + * - \ref NVML_SUCCESS Successful completion + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pCreatablePlacementInfo is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE If the buffer is small, element count is returned in \a pCreatablePlacementInfo->count + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pCreatablePlacementInfo is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU or vGPU heterogeneous mode is not enabled + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetVgpuTypeCreatablePlacements(nvmlGpuInstance_t gpuInstance, nvmlVgpuCreatablePlacementInfo_t *pCreatablePlacementInfo); + +/** + * Get the vGPU heterogeneous mode for the GPU instance. + * + * When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes. + * + * On successful return, the function returns \a pHeterogeneousMode->mode with the current vGPU heterogeneous mode. + * \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should + * set the correct version number to retrieve the vGPU heterogeneous mode. + * \a pHeterogeneousMode->mode can either be \ref NVML_FEATURE_ENABLED or \ref NVML_FEATURE_DISABLED. + * + * %GB20X_OR_NEWER% + * + * @param gpuInstance The GPU instance handle + * @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, or \a pHeterogeneousMode is NULL + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU or not in MIG mode + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, nvmlVgpuHeterogeneousMode_t *pHeterogeneousMode); + +/** + * Enable or disable vGPU heterogeneous mode for the GPU instance. + * + * When in heterogeneous mode, a vGPU can concurrently host timesliced vGPUs with differing framebuffer sizes. + * + * API would return an appropriate error code upon unsuccessful activation. For example, the heterogeneous mode + * set will fail with error \ref NVML_ERROR_IN_USE if any vGPU instance is active within the GPU instance. + * The caller of this API is expected to shutdown the vGPU VMs and retry setting the \a mode. + * On successful return, the function updates the vGPU heterogeneous mode with the user provided \a pHeterogeneousMode->mode. + * \a pHeterogeneousMode->version is the version number of the structure nvmlVgpuHeterogeneousMode_t, the caller should + * set the correct version number to set the vGPU heterogeneous mode. + * + * %GB20X_OR_NEWER% + * + * @param gpuInstance The GPU instance handle + * @param pHeterogeneousMode Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is NULL or invalid, + * or \a pHeterogeneousMode is NULL or \a pHeterogeneousMode->mode is invalid + * or GPU Instance Id is invalid + * - \ref NVML_ERROR_IN_USE If the \a gpuInstance is in use + * - \ref NVML_ERROR_NOT_SUPPORTED If not on a vGPU host or an unsupported GPU + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a pHeterogeneousMode is invalid + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceSetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, const nvmlVgpuHeterogeneousMode_t *pHeterogeneousMode); + /** @} */ /***************************************************************************************************/ @@ -7662,7 +10782,7 @@ typedef struct nvmlVgpuMetadata_st char guestDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in guest char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in host unsigned int reserved[6]; //!< Reserved for internal use - unsigned int vgpuVirtualizationCaps; //!< vGPU virtualizaion capabilities bitfileld + unsigned int vgpuVirtualizationCaps; //!< vGPU virtualization capabilities bitfield unsigned int guestVgpuVersion; //!< vGPU version of guest driver unsigned int opaqueDataSize; //!< Size of opaque data field in bytes char opaqueData[4]; //!< Opaque data @@ -7676,7 +10796,7 @@ typedef struct nvmlVgpuPgpuMetadata_st unsigned int version; //!< Current version of the structure unsigned int revision; //!< Current revision of the structure char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Host driver version - unsigned int pgpuVirtualizationCaps; //!< Pgpu virtualizaion capabilities bitfileld + unsigned int pgpuVirtualizationCaps; //!< Pgpu virtualization capabilities bitfield unsigned int reserved[5]; //!< Reserved for internal use nvmlVgpuVersion_t hostSupportedVgpuRange; //!< vGPU version range supported by host driver unsigned int opaqueDataSize; //!< Size of opaque data field in bytes @@ -7774,7 +10894,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpu * * The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The * structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility - * with the physical GPU is limited, a limit code indicates the factor limiting compability. + * with the physical GPU is limited, a limit code indicates the factor limiting compatability. * (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details). * * Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to @@ -7786,8 +10906,8 @@ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpu * * @return * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, nvmlVgpuPgpuMetadata_t *pgpuMetadata, nvmlVgpuPgpuCompatibility_t *compatibilityInfo); @@ -7805,9 +10925,9 @@ nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, * @return * - \ref NVML_SUCCESS GPU metadata structure was successfully returned * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a pgpuMetadata buffer is too small, required size is returned in \a bufferSize - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_SUPPORTED If vGPU is not supported by the system + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char *pgpuMetadata, unsigned int *bufferSize); @@ -7825,14 +10945,15 @@ nvmlReturn_t DECLDIR nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char * * * @return * - \ref NVML_SUCCESS vGPU scheduler logs were successfully obtained - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerLog is NULL or \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a pSchedulerLog is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpuSchedulerLog_t *pSchedulerLog); /** * Returns the vGPU scheduler state. + * The information returned in \a nvmlVgpuSchedulerGetState_t is not relevant if the BEST EFFORT policy is set. * * For Pascal &tm; or newer fully supported devices. * @@ -7841,37 +10962,12 @@ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpu * * @return * - \ref NVML_SUCCESS vGPU scheduler state is successfully obtained - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a pSchedulerState is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerGetState_t *pSchedulerState); -/** - * Sets the vGPU scheduler state. - * - * For Pascal &tm; or newer fully supported devices. - * - * The scheduler state change won’t persist across module load/unload. - * Scheduler state and params will be allowed to set only when no VM is running. - * In \a nvmlVgpuSchedulerSetState_t, IFF enableARRMode=1 then - * provide avgFactorForARR and frequency as input. If enableARRMode is disabled - * then provide timeslice as input. - * - * @param device The identifier of the target \a device - * @param pSchedulerState vGPU \a pSchedulerState to set - * - * @return - * - \ref NVML_SUCCESS vGPU scheduler state has been successfully set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid - * - \ref NVML_ERROR_RESET_REQUIRED if setting \a pSchedulerState failed with fatal error, - * reboot is required to overcome from this error. - * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode - * or if any vGPU instance currently exists on the \a device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t *pSchedulerState); - /** * Returns the vGPU scheduler capabilities. * The list of supported vGPU schedulers returned in \a nvmlVgpuSchedulerCapabilities_t is from @@ -7889,12 +10985,37 @@ nvmlReturn_t DECLDIR nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVg * * @return * - \ref NVML_SUCCESS vGPU scheduler capabilities were successfully obtained - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pCapabilities is NULL or \a device is invalid + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a pCapabilities is NULL or \a device is invalid * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode - * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * - \ref NVML_ERROR_UNKNOWN On any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t device, nvmlVgpuSchedulerCapabilities_t *pCapabilities); +/** + * Sets the vGPU scheduler state. + * + * For Pascal &tm; or newer fully supported devices. + * + * The scheduler state change won't persist across module load/unload. + * Scheduler state and params will be allowed to set only when no VM is running. + * In \a nvmlVgpuSchedulerSetState_t, IFF enableARRMode is enabled then + * provide avgFactorForARR and frequency as input. If enableARRMode is disabled + * then provide timeslice as input. + * + * @param device The identifier of the target \a device + * @param pSchedulerState vGPU \a pSchedulerState to set + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler state has been successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a pSchedulerState is NULL or \a device is invalid + * - \ref NVML_ERROR_RESET_REQUIRED If setting \a pSchedulerState failed with fatal error, + * reboot is required to overcome from this error. + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or \a device not in vGPU host mode + * or if any vGPU instance currently exists on the \a device + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t *pSchedulerState); + /* * Virtual GPU (vGPU) version * @@ -8012,6 +11133,52 @@ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); +/** + * Retrieves recent utilization for vGPU instances running on a physical GPU (device). + * + * For Kepler &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for vGPU + * instances running on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied + * buffer pointed at by \a vgpuUtilInfo->vgpuUtilArray. One utilization sample structure is returned per vGPU instance, and includes the + * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values + * in nvmlValue_t unions. The function sets the caller-supplied \a vgpuUtilInfo->sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to + * indicate the returned value type. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a vgpuUtilInfo->vgpuUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuUtilInfo->vgpuInstanceCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate + * a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t). Invoke the function again with + * the allocated buffer passed in \a vgpuUtilInfo->vgpuUtilArray, and \a vgpuUtilInfo->vgpuInstanceCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuUtilInfo->vgpuInstanceCount with the number of vGPU utilization sample + * structures that were actually written. This may differ from a previously read value as vGPU instances are created or + * destroyed. + * + * \a vgpuUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set \a vgpuUtilInfo->lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param vgpuUtilInfo Pointer to the caller-provided structure of nvmlVgpuInstancesUtilizationInfo_t + + * @return + * - \ref NVML_SUCCESS If utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, \a vgpuUtilInfo is NULL, or \a vgpuUtilInfo->vgpuInstanceCount is 0 + * - \ref NVML_ERROR_NOT_SUPPORTED If vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST If the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a vgpuUtilInfo is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE If \a vgpuUtilInfo->vgpuUtilArray is NULL, or the buffer size of vgpuUtilInfo->vgpuInstanceCount is too small. + * The caller should check the current vGPU instance count from the returned vgpuUtilInfo->vgpuInstanceCount, and call + * the function again with a buffer of size vgpuUtilInfo->vgpuInstanceCount * sizeof(nvmlVgpuInstanceUtilizationInfo_t) + * - \ref NVML_ERROR_NOT_FOUND If sample entries are not found + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice_t device, + nvmlVgpuInstancesUtilizationInfo_t *vgpuUtilInfo); + /** * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). * @@ -8058,6 +11225,52 @@ nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, unsigned int *vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t *utilizationSamples); + +/** + * Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device). + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, video decoder, jpeg decoder, and OFA for processes running + * on vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied + * buffer pointed at by \a vgpuProcUtilInfo->vgpuProcUtilArray. One utilization sample structure is returned per process running + * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which + * the samples were recorded. Individual utilization values are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a vgpuProcUtilInfo->vgpuProcUtilArray set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current processes' count + * running on vGPU instances in \a vgpuProcUtilInfo->vgpuProcessCount. The caller should allocate a buffer of size + * vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed + * in \a vgpuProcUtilInfo->vgpuProcUtilArray, and \a vgpuProcUtilInfo->vgpuProcessCount set to the number of entries the buffer is sized for. + * + * On successful return, the function updates \a vgpuProcUtilInfo->vgpuProcessCount with the number of vGPU sub process utilization sample + * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active + * in any given sample period. + * + * vgpuProcUtilInfo->lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set vgpuProcUtilInfo->lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param vgpuProcUtilInfo Pointer to the caller-provided structure of nvmlVgpuProcessesUtilizationInfo_t + + * @return + * - \ref NVML_SUCCESS If utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid, or \a vgpuProcUtilInfo is null + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the version of \a vgpuProcUtilInfo is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE If \a vgpuProcUtilInfo->vgpuProcUtilArray is null, or supplied \a vgpuProcUtilInfo->vgpuProcessCount + * is too small to return samples for all processes on vGPU instances currently executing on the device. + * The caller should check the current processes count from the returned \a vgpuProcUtilInfo->vgpuProcessCount, + * and call the function again with a buffer of size + * vgpuProcUtilInfo->vgpuProcessCount * sizeof(nvmlVgpuProcessUtilizationSample_t) + * - \ref NVML_ERROR_NOT_SUPPORTED If vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST If the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND If sample entries are not found + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice_t device, nvmlVgpuProcessesUtilizationInfo_t *vgpuProcUtilInfo); + /** * Queries the state of per process accounting mode on vGPU. * @@ -8264,10 +11477,49 @@ nvmlReturn_t DECLDIR nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlEx #define NVML_GPU_INSTANCE_PROFILE_7_SLICE 0x4 #define NVML_GPU_INSTANCE_PROFILE_8_SLICE 0x5 #define NVML_GPU_INSTANCE_PROFILE_6_SLICE 0x6 +// 1_SLICE profile with at least one (if supported at all) of Decoder, Encoder, JPEG, OFA engines. #define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1 0x7 +// 2_SLICE profile with at least one (if supported at all) of Decoder, Encoder, JPEG, OFA engines. #define NVML_GPU_INSTANCE_PROFILE_2_SLICE_REV1 0x8 +// 1_SLICE profile with twice the amount of memory resources. #define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV2 0x9 -#define NVML_GPU_INSTANCE_PROFILE_COUNT 0xA +// 1_SLICE gfx capable profile +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_GFX 0x0A +// 2_SLICE gfx capable profile +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE_GFX 0x0B +// 4_SLICE gfx capable profile +#define NVML_GPU_INSTANCE_PROFILE_4_SLICE_GFX 0x0C +// 1_SLICE profile with none of Decode, Encoder, JPEG, OFA engines. +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_NO_ME 0x0D +// 2_SLICE profile with none of Decode, Encoder, JPEG, OFA engines. +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE_NO_ME 0x0E +// 1_SLICE profile with all of GPU Decode, Encoder, JPEG, OFA engines. +// Allocation of instance of this profile prevents allocation of +// all but _NO_ME profiles. +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_ALL_ME 0x0F +// 2_SLICE profile with all of GPU Decode, Encoder, JPEG, OFA engines. +// Allocation of instance of this profile prevents allocation of +// all but _NO_ME profiles. +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE_ALL_ME 0x10 +#define NVML_GPU_INSTANCE_PROFILE_COUNT 0x11 + +/** + * MIG GPU instance profile capability. + * + * Bit field values representing MIG profile capabilities + * \ref nvmlGpuInstanceProfileInfo_v3_t.capabilities + */ +#define NVML_GPU_INSTANCE_PROFILE_CAPS_P2P 0x1 +#define NVML_GPU_INTSTANCE_PROFILE_CAPS_P2P 0x1 //!< Deprecated, do not use +#define NVML_GPU_INSTANCE_PROFILE_CAPS_GFX 0x2 + +/** + * MIG compute instance profile capability. + * + * Bit field values representing MIG profile capabilities + * \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities + */ +#define NVML_COMPUTE_INSTANCE_PROFILE_CAPS_GFX 0x1 typedef struct nvmlGpuInstancePlacement_st { @@ -8294,18 +11546,45 @@ typedef struct nvmlGpuInstanceProfileInfo_st } nvmlGpuInstanceProfileInfo_t; /** - * GPU instance profile information (v2). + * GPU instance profile information (v2). + * + * Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlGpuInstanceProfileInfo_t. + */ +typedef struct nvmlGpuInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the device + unsigned int isP2pSupported; //!< Peer-to-Peer support + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlGpuInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v2_t.version. + */ +#define nvmlGpuInstanceProfileInfo_v2 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 2) + +/** + * GPU instance profile information (v3). * - * Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field - * to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name - * field to the end. This structure is not backwards-compatible with - * \ref nvmlGpuInstanceProfileInfo_t. + * Version 3 removes isP2pSupported field and adds the \ref nvmlGpuInstanceProfileInfo_v3_t.capabilities + * field \ref nvmlGpuInstanceProfileInfo_t. */ -typedef struct nvmlGpuInstanceProfileInfo_v2_st +typedef struct nvmlGpuInstanceProfileInfo_v3_st { - unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v2) + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v3) unsigned int id; //!< Unique profile ID within the device - unsigned int isP2pSupported; //!< Peer-to-Peer support unsigned int sliceCount; //!< GPU Slice count unsigned int instanceCount; //!< GPU instance count unsigned int multiprocessorCount; //!< Streaming Multiprocessor count @@ -8316,12 +11595,13 @@ typedef struct nvmlGpuInstanceProfileInfo_v2_st unsigned int ofaCount; //!< OFA Engine count unsigned long long memorySizeMB; //!< Memory size in MBytes char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name -} nvmlGpuInstanceProfileInfo_v2_t; + unsigned int capabilities; //!< Additional capabilities +} nvmlGpuInstanceProfileInfo_v3_t; /** - * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v2_t.version. + * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v3_t.version. */ -#define nvmlGpuInstanceProfileInfo_v2 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 2) +#define nvmlGpuInstanceProfileInfo_v3 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 3) typedef struct nvmlGpuInstanceInfo_st { @@ -8331,24 +11611,19 @@ typedef struct nvmlGpuInstanceInfo_st nvmlGpuInstancePlacement_t placement; //!< Placement for this instance } nvmlGpuInstanceInfo_t; -typedef struct -{ - struct nvmlGpuInstance_st* handle; -} nvmlGpuInstance_t; - /** * Compute instance profiles. * * These macros should be passed to \ref nvmlGpuInstanceGetComputeInstanceProfileInfo to retrieve the * detailed information about a compute instance such as profile ID, engine counts */ -#define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE 0x0 -#define NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE 0x1 -#define NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE 0x2 -#define NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE 0x3 -#define NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE 0x4 -#define NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE 0x5 -#define NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE 0x0 +#define NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE 0x1 +#define NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE 0x2 +#define NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE 0x3 +#define NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE 0x4 +#define NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE 0x6 #define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 0x7 #define NVML_COMPUTE_INSTANCE_PROFILE_COUNT 0x8 @@ -8405,6 +11680,33 @@ typedef struct nvmlComputeInstanceProfileInfo_v2_st */ #define nvmlComputeInstanceProfileInfo_v2 NVML_STRUCT_VERSION(ComputeInstanceProfileInfo, 2) +/** + * Compute instance profile information (v3). + * + * Version 3 adds the \ref nvmlComputeInstanceProfileInfo_v3_t.capabilities field + * \ref nvmlComputeInstanceProfileInfo_t. + */ +typedef struct nvmlComputeInstanceProfileInfo_v3_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v3) + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name + unsigned int capabilities; //!< Additional capabilities +} nvmlComputeInstanceProfileInfo_v3_t; + +/** + * Version identifier value for \ref nvmlComputeInstanceProfileInfo_v3_t.version. + */ +#define nvmlComputeInstanceProfileInfo_v3 NVML_STRUCT_VERSION(ComputeInstanceProfileInfo, 3) + typedef struct nvmlComputeInstanceInfo_st { nvmlDevice_t device; //!< Parent device @@ -8477,10 +11779,13 @@ nvmlReturn_t DECLDIR nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode nvmlReturn_t DECLDIR nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int *currentMode, unsigned int *pendingMode); /** - * Get GPU instance profile information. + * Get GPU instance profile information * * Information provided by this API is immutable throughout the lifetime of a MIG mode. * + * @note This API can be used to enumerate all MIG profiles supported by NVML in a forward compatible + * way by invoking it on \a profile values starting from 0, until the API returns \ref NVML_ERROR_INVALID_ARGUMENT. + * * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * @@ -8492,7 +11797,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int *cur * - \ref NVML_SUCCESS Upon success * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile or \a info are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profile isn't supported * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation */ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfo(nvmlDevice_t device, unsigned int profile, @@ -8533,7 +11838,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, u * Get GPU instance placements. * * A placement represents the location of a GPU instance within a device. This API only returns all the possible - * placements for the given profile. + * placements for the given profile regardless of whether MIG is enabled or not. * A created GPU instance occupies memory slices described by its placement. Creation of new GPU instance will * fail if there is overlap with the already occupied memory slices. * @@ -8552,7 +11857,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, u * - \ref NVML_SUCCESS Upon success * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG or \a profileId isn't supported * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation */ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, @@ -8720,6 +12025,9 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlG * * Information provided by this API is immutable throughout the lifetime of a MIG mode. * + * @note This API can be used to enumerate all MIG profiles supported by NVML in a forward compatible + * way by invoking it on \a profile values starting from 0, until the API returns \ref NVML_ERROR_INVALID_ARGUMENT. + * * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * @@ -9008,303 +12316,95 @@ nvmlReturn_t DECLDIR nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned i * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int *id); - -/** - * Get compute instance ID for the given MIG device handle. - * - * Compute instance IDs are unique per GPU instance and remain valid until the compute instance - * is destroyed. - * - * For Ampere &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device Target MIG device handle - * @param id Compute instance ID - * - * @return - * - \ref NVML_SUCCESS if instance ID was successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int *id); - -/** - * Get the maximum number of MIG devices that can exist under a given parent NVML device. - * - * Returns zero if MIG is not supported or enabled. - * - * For Ampere &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device Target device handle - * @param count Count of MIG devices - * - * @return - * - \ref NVML_SUCCESS if \a count was successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a count reference is invalid - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int *count); - -/** - * Get MIG device handle for the given index under its parent NVML device. - * - * If the compute instance is destroyed either explicitly or by destroying, - * resetting or unbinding the parent GPU instance or the GPU device itself - * the MIG device handle would remain invalid and must be requested again - * using this API. Handles may be reused and their properties can change in - * the process. - * - * For Ampere &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device Reference to the parent GPU device handle - * @param index Index of the MIG device - * @param migDevice Reference to the MIG device handle - * - * @return - * - \ref NVML_SUCCESS if \a migDevice handle was successfully created - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a index or \a migDevice reference is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_NOT_FOUND if no valid MIG device was found at \a index - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, - nvmlDevice_t *migDevice); - -/** - * Get parent device handle from a MIG device handle. - * - * For Ampere &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param migDevice MIG device handle - * @param device Device handle - * - * @return - * - \ref NVML_SUCCESS if \a device handle was successfully created - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a migDevice or \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t *device); - -/** - * Get the type of the GPU Bus (PCIe, PCI, ...) - * - * @param device The identifier of the target device - * @param type The PCI Bus type - * - * return - * - \ref NVML_SUCCESS if the bus \a type is successfully retreived - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \device is invalid or \type is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t *type); - -/** - * Retrieve performance monitor samples from the associated subdevice. - * - * @param device - * @param pDynamicPstatesInfo - * - * @return - * - \ref NVML_SUCCESS if \a pDynamicPstatesInfo has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pDynamicPstatesInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t *pDynamicPstatesInfo); - -/** - * Sets the speed of a specified fan. - * - * WARNING: This function changes the fan control policy to manual. It means that YOU have to monitor - * the temperature and adjust the fan speed accordingly. - * If you set the fan speed too low you can burn your GPU! - * Use nvmlDeviceSetDefaultFanSpeed_v2 to restore default control policy. - * - * For all cuda-capable discrete products with fans that are Maxwell or Newer. - * - * device The identifier of the target device - * fan The index of the fan, starting at zero - * speed The target speed of the fan [0-100] in % of max speed - * - * return - * NVML_SUCCESS if the fan speed has been set - * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * NVML_ERROR_INVALID_ARGUMENT if the device is not valid, or the speed is outside acceptable ranges, - * or if the fan index doesn't reference an actual fan. - * NVML_ERROR_NOT_SUPPORTED if the device is older than Maxwell. - * NVML_ERROR_UNKNOWN if there was an unexpected error. - */ -nvmlReturn_t DECLDIR nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed); - -/** - * Retrieve the GPCCLK VF offset value - * @param[in] device The identifier of the target device - * @param[out] offset The retrieved GPCCLK VF offset value - * - * @return - * - \ref NVML_SUCCESS if \a offset has been successfully queried - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int *offset); - -/** - * Set the GPCCLK VF offset value - * @param[in] device The identifier of the target device - * @param[in] offset The GPCCLK VF offset value to set - * - * @return - * - \ref NVML_SUCCESS if \a offset has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetGpcClkVfOffset(nvmlDevice_t device, int offset); - -/** - * Retrieve the MemClk (Memory Clock) VF offset value. - * @param[in] device The identifier of the target device - * @param[out] offset The retrieved MemClk VF offset value - * - * @return - * - \ref NVML_SUCCESS if \a offset has been successfully queried - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int *offset); - -/** - * Set the MemClk (Memory Clock) VF offset value. It requires elevated privileges. - * @param[in] device The identifier of the target device - * @param[in] offset The MemClk VF offset value to set - * - * @return - * - \ref NVML_SUCCESS if \a offset has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetMemClkVfOffset(nvmlDevice_t device, int offset); - -/** - * Retrieve min and max clocks of some clock domain for a given PState - * - * @param device The identifier of the target device - * @param type Clock domain - * @param pstate PState to query - * @param minClockMHz Reference in which to return min clock frequency - * @param maxClockMHz Reference in which to return max clock frequency - * - * @return - * - \ref NVML_SUCCESS if everything worked - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a type or \a pstate are invalid or both - * \a minClockMHz and \a maxClockMHz are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, - unsigned int * minClockMHz, unsigned int * maxClockMHz); +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int *id); /** - * Get all supported Performance States (P-States) for the device. + * Get compute instance ID for the given MIG device handle. * - * The returned array would contain a contiguous list of valid P-States supported by - * the device. If the number of supported P-States is fewer than the size of the array - * supplied missing elements would contain \a NVML_PSTATE_UNKNOWN. + * Compute instance IDs are unique per GPU instance and remain valid until the compute instance + * is destroyed. * - * The number of elements in the returned list will never exceed \a NVML_MAX_GPU_PERF_PSTATES. + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. * - * @param device The identifier of the target device - * @param pstates Container to return the list of performance states - * supported by device - * @param size Size of the supplied \a pstates array in bytes + * @param device Target MIG device handle + * @param id Compute instance ID * * @return - * - \ref NVML_SUCCESS if \a pstates array has been retrieved - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if the the container supplied was not large enough to - * hold the resulting list + * - \ref NVML_SUCCESS if instance ID was successfully retrieved * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a pstates is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support performance state readings + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, - nvmlPstates_t *pstates, unsigned int size); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int *id); /** - * Retrieve the GPCCLK min max VF offset value. - * @param[in] device The identifier of the target device - * @param[out] minOffset The retrieved GPCCLK VF min offset value - * @param[out] maxOffset The retrieved GPCCLK VF max offset value + * Get the maximum number of MIG devices that can exist under a given parent NVML device. + * + * Returns zero if MIG is not supported or enabled. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target device handle + * @param count Count of MIG devices * * @return - * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_SUCCESS if \a count was successfully retrieved * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a count reference is invalid * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, - int *minOffset, int *maxOffset); +nvmlReturn_t DECLDIR nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int *count); /** - * Retrieve the MemClk (Memory Clock) min max VF offset value. - * @param[in] device The identifier of the target device - * @param[out] minOffset The retrieved MemClk VF min offset value - * @param[out] maxOffset The retrieved MemClk VF max offset value + * Get MIG device handle for the given index under its parent NVML device. + * + * If the compute instance is destroyed either explicitly or by destroying, + * resetting or unbinding the parent GPU instance or the GPU device itself + * the MIG device handle would remain invalid and must be requested again + * using this API. Handles may be reused and their properties can change in + * the process. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Reference to the parent GPU device handle + * @param index Index of the MIG device + * @param migDevice Reference to the MIG device handle * * @return - * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_SUCCESS if \a migDevice handle was successfully created * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a index or \a migDevice reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_NOT_FOUND if no valid MIG device was found at \a index * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, - int *minOffset, int *maxOffset); +nvmlReturn_t DECLDIR nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, + nvmlDevice_t *migDevice); /** - * Get fabric information associated with the device. - * - * %HOPPER_OR_NEWER% + * Get parent device handle from a MIG device handle. * - * On Hopper + NVSwitch systems, GPU is registered with the NVIDIA Fabric Manager - * Upon successful registration, the GPU is added to the NVLink fabric to enable - * peer-to-peer communication. - * This API reports the current state of the GPU in the NVLink fabric - * along with other useful information. + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. * - * @param device The identifier of the target device - * @param gpuFabricInfo Information about GPU fabric state + * @param migDevice MIG device handle + * @param device Device handle * * @return - * - \ref NVML_SUCCESS Upon success - * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric + * - \ref NVML_SUCCESS if \a device handle was successfully created + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a migDevice or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfo(nvmlDevice_t device, nvmlGpuFabricInfo_t *gpuFabricInfo); +nvmlReturn_t DECLDIR nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t *device); + +/** @} */ // @defgroup nvmlMultiInstanceGPU -/** @} */ /***************************************************************************************************/ /** @defgroup GPM NVML GPM @@ -9316,81 +12416,193 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfo(nvmlDevice_t device, nvmlGpuFabr */ /***************************************************************************************************/ -/* GPM Metric Identifiers */ +/** + * GPM Metric Identifiers + */ typedef enum { - NVML_GPM_METRIC_GRAPHICS_UTIL = 1, /* Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0 */ - NVML_GPM_METRIC_SM_UTIL = 2, /* Percentage of SMs that were busy. 0.0 - 100.0 */ - NVML_GPM_METRIC_SM_OCCUPANCY = 3, /* Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0 */ - NVML_GPM_METRIC_INTEGER_UTIL = 4, /* Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0 */ - NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5, /* Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0 */ - NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6, /* Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0 */ - NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7, /* Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0 */ - NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9, /* Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0 */ - NVML_GPM_METRIC_DRAM_BW_UTIL = 10, /* Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 */ - NVML_GPM_METRIC_FP64_UTIL = 11, /* Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0 */ - NVML_GPM_METRIC_FP32_UTIL = 12, /* Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0 */ - NVML_GPM_METRIC_FP16_UTIL = 13, /* Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0 */ - NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20, /* PCIe traffic from this GPU in MiB/sec */ - NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21, /* PCIe traffic to this GPU in MiB/sec */ - NVML_GPM_METRIC_NVDEC_0_UTIL = 30, /* Percent utilization of NVDEC 0. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_1_UTIL = 31, /* Percent utilization of NVDEC 1. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_2_UTIL = 32, /* Percent utilization of NVDEC 2. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_3_UTIL = 33, /* Percent utilization of NVDEC 3. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_4_UTIL = 34, /* Percent utilization of NVDEC 4. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_5_UTIL = 35, /* Percent utilization of NVDEC 5. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_6_UTIL = 36, /* Percent utilization of NVDEC 6. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVDEC_7_UTIL = 37, /* Percent utilization of NVDEC 7. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_0_UTIL = 40, /* Percent utilization of NVJPG 0. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_1_UTIL = 41, /* Percent utilization of NVJPG 1. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_2_UTIL = 42, /* Percent utilization of NVJPG 2. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_3_UTIL = 43, /* Percent utilization of NVJPG 3. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_4_UTIL = 44, /* Percent utilization of NVJPG 4. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_5_UTIL = 45, /* Percent utilization of NVJPG 5. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_6_UTIL = 46, /* Percent utilization of NVJPG 6. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVJPG_7_UTIL = 47, /* Percent utilization of NVJPG 7. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVOFA_0_UTIL = 50, /* Percent utilization of NVOFA 0. 0.0 - 100.0 */ - NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60, /* NvLink read bandwidth for all links in MiB/sec */ - NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61, /* NvLink write bandwidth for all links in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62, /* NvLink read bandwidth for link 0 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63, /* NvLink write bandwidth for link 0 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64, /* NvLink read bandwidth for link 1 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65, /* NvLink write bandwidth for link 1 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66, /* NvLink read bandwidth for link 2 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67, /* NvLink write bandwidth for link 2 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68, /* NvLink read bandwidth for link 3 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69, /* NvLink write bandwidth for link 3 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70, /* NvLink read bandwidth for link 4 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71, /* NvLink write bandwidth for link 4 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72, /* NvLink read bandwidth for link 5 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73, /* NvLink write bandwidth for link 5 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74, /* NvLink read bandwidth for link 6 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75, /* NvLink write bandwidth for link 6 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76, /* NvLink read bandwidth for link 7 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77, /* NvLink write bandwidth for link 7 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78, /* NvLink read bandwidth for link 8 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79, /* NvLink write bandwidth for link 8 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80, /* NvLink read bandwidth for link 9 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81, /* NvLink write bandwidth for link 9 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82, /* NvLink read bandwidth for link 10 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83, /* NvLink write bandwidth for link 10 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84, /* NvLink read bandwidth for link 11 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85, /* NvLink write bandwidth for link 11 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86, /* NvLink read bandwidth for link 12 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87, /* NvLink write bandwidth for link 12 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88, /* NvLink read bandwidth for link 13 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89, /* NvLink write bandwidth for link 13 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90, /* NvLink read bandwidth for link 14 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91, /* NvLink write bandwidth for link 14 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92, /* NvLink read bandwidth for link 15 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93, /* NvLink write bandwidth for link 15 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94, /* NvLink read bandwidth for link 16 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95, /* NvLink write bandwidth for link 16 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96, /* NvLink read bandwidth for link 17 in MiB/sec */ - NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97, /* NvLink write bandwidth for link 17 in MiB/sec */ - NVML_GPM_METRIC_MAX = 98, /* Maximum value above +1. Note that changing this - should also change NVML_GPM_METRICS_GET_VERSION - due to struct size change */ + NVML_GPM_METRIC_GRAPHICS_UTIL = 1, //!< Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0 + NVML_GPM_METRIC_SM_UTIL = 2, //!< Percentage of SMs that were busy. 0.0 - 100.0 + NVML_GPM_METRIC_SM_OCCUPANCY = 3, //!< Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0 + NVML_GPM_METRIC_INTEGER_UTIL = 4, //!< Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0 + NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5, //!< Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6, //!< Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7, //!< Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9, //!< Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0 + NVML_GPM_METRIC_DRAM_BW_UTIL = 10, //!< Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 */ + NVML_GPM_METRIC_FP64_UTIL = 11, //!< Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0 + NVML_GPM_METRIC_FP32_UTIL = 12, //!< Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0 + NVML_GPM_METRIC_FP16_UTIL = 13, //!< Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0 + NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20, //!< PCIe traffic from this GPU in MiB/sec + NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21, //!< PCIe traffic to this GPU in MiB/sec + NVML_GPM_METRIC_NVDEC_0_UTIL = 30, //!< Percent utilization of NVDEC 0. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_1_UTIL = 31, //!< Percent utilization of NVDEC 1. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_2_UTIL = 32, //!< Percent utilization of NVDEC 2. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_3_UTIL = 33, //!< Percent utilization of NVDEC 3. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_4_UTIL = 34, //!< Percent utilization of NVDEC 4. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_5_UTIL = 35, //!< Percent utilization of NVDEC 5. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_6_UTIL = 36, //!< Percent utilization of NVDEC 6. 0.0 - 100.0 + NVML_GPM_METRIC_NVDEC_7_UTIL = 37, //!< Percent utilization of NVDEC 7. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_0_UTIL = 40, //!< Percent utilization of NVJPG 0. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_1_UTIL = 41, //!< Percent utilization of NVJPG 1. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_2_UTIL = 42, //!< Percent utilization of NVJPG 2. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_3_UTIL = 43, //!< Percent utilization of NVJPG 3. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_4_UTIL = 44, //!< Percent utilization of NVJPG 4. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_5_UTIL = 45, //!< Percent utilization of NVJPG 5. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_6_UTIL = 46, //!< Percent utilization of NVJPG 6. 0.0 - 100.0 + NVML_GPM_METRIC_NVJPG_7_UTIL = 47, //!< Percent utilization of NVJPG 7. 0.0 - 100.0 + NVML_GPM_METRIC_NVOFA_0_UTIL = 50, //!< Percent utilization of NVOFA 0. 0.0 - 100.0 + NVML_GPM_METRIC_NVOFA_1_UTIL = 51, //!< Percent utilization of NVOFA 1. 0.0 - 100.0 + NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60, //!< NvLink read bandwidth for all links in MiB/sec + NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61, //!< NvLink write bandwidth for all links in MiB/sec + NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62, //!< NvLink read bandwidth for link 0 in MiB/sec + NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63, //!< NvLink write bandwidth for link 0 in MiB/sec + NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64, //!< NvLink read bandwidth for link 1 in MiB/sec + NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65, //!< NvLink write bandwidth for link 1 in MiB/sec + NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66, //!< NvLink read bandwidth for link 2 in MiB/sec + NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67, //!< NvLink write bandwidth for link 2 in MiB/sec + NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68, //!< NvLink read bandwidth for link 3 in MiB/sec + NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69, //!< NvLink write bandwidth for link 3 in MiB/sec + NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70, //!< NvLink read bandwidth for link 4 in MiB/sec + NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71, //!< NvLink write bandwidth for link 4 in MiB/sec + NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72, //!< NvLink read bandwidth for link 5 in MiB/sec + NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73, //!< NvLink write bandwidth for link 5 in MiB/sec + NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74, //!< NvLink read bandwidth for link 6 in MiB/sec + NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75, //!< NvLink write bandwidth for link 6 in MiB/sec + NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76, //!< NvLink read bandwidth for link 7 in MiB/sec + NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77, //!< NvLink write bandwidth for link 7 in MiB/sec + NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78, //!< NvLink read bandwidth for link 8 in MiB/sec + NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79, //!< NvLink write bandwidth for link 8 in MiB/sec + NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80, //!< NvLink read bandwidth for link 9 in MiB/sec + NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81, //!< NvLink write bandwidth for link 9 in MiB/sec + NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82, //!< NvLink read bandwidth for link 10 in MiB/sec + NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83, //!< NvLink write bandwidth for link 10 in MiB/sec + NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84, //!< NvLink read bandwidth for link 11 in MiB/sec + NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85, //!< NvLink write bandwidth for link 11 in MiB/sec + NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86, //!< NvLink read bandwidth for link 12 in MiB/sec + NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87, //!< NvLink write bandwidth for link 12 in MiB/sec + NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88, //!< NvLink read bandwidth for link 13 in MiB/sec + NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89, //!< NvLink write bandwidth for link 13 in MiB/sec + NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90, //!< NvLink read bandwidth for link 14 in MiB/sec + NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91, //!< NvLink write bandwidth for link 14 in MiB/sec + NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92, //!< NvLink read bandwidth for link 15 in MiB/sec + NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93, //!< NvLink write bandwidth for link 15 in MiB/sec + NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94, //!< NvLink read bandwidth for link 16 in MiB/sec + NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95, //!< NvLink write bandwidth for link 16 in MiB/sec + NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96, //!< NvLink read bandwidth for link 17 in MiB/sec + NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97, //!< NvLink write bandwidth for link 17 in MiB/sec + //Put new metrics for BLACKWELL here... + NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC = 100, + NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC = 101, + NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC = 102, + NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC = 103, + NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = 104, + NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = 105, + NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = 106, + NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = 107, + NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = 108, + NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = 109, + NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = 110, + NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = 111, + NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = 112, + NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = 113, + NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = 114, + NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = 115, + NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = 116, + NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = 117, + NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = 118, + NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = 119, + NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = 120, + NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = 121, + NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = 122, + NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = 123, + NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = 124, + NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = 125, + NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = 126, + NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = 127, + NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = 128, + NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = 129, + NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = 130, + NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = 131, + NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = 132, + NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = 133, + NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = 134, + NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = 135, + NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = 136, + NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = 137, + NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = 138, + NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = 139, + NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = 140, + NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = 141, + NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = 142, + NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = 143, + NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = 144, + NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = 145, + NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = 146, + NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = 147, + NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = 148, + NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = 149, + NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = 150, + NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = 151, + NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = 152, + NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = 153, + NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = 154, + NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = 155, + NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = 156, + NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = 157, + NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = 158, + NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = 159, + NVML_GPM_METRIC_HOSTMEM_CACHE_HIT = 160, + NVML_GPM_METRIC_HOSTMEM_CACHE_MISS = 161, + NVML_GPM_METRIC_PEERMEM_CACHE_HIT = 162, + NVML_GPM_METRIC_PEERMEM_CACHE_MISS = 163, + NVML_GPM_METRIC_DRAM_CACHE_HIT = 164, + NVML_GPM_METRIC_DRAM_CACHE_MISS = 165, + NVML_GPM_METRIC_NVENC_0_UTIL = 166, + NVML_GPM_METRIC_NVENC_1_UTIL = 167, + NVML_GPM_METRIC_NVENC_2_UTIL = 168, + NVML_GPM_METRIC_NVENC_3_UTIL = 169, + NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = 170, + NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = 171, + NVML_GPM_METRIC_GR0_CTXSW_REQUESTS = 172, + NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = 173, + NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = 174, + NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = 175, + NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = 176, + NVML_GPM_METRIC_GR1_CTXSW_REQUESTS = 177, + NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = 178, + NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = 179, + NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = 180, + NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = 181, + NVML_GPM_METRIC_GR2_CTXSW_REQUESTS = 182, + NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = 183, + NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = 184, + NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = 185, + NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = 186, + NVML_GPM_METRIC_GR3_CTXSW_REQUESTS = 187, + NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = 188, + NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = 189, + NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = 190, + NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = 191, + NVML_GPM_METRIC_GR4_CTXSW_REQUESTS = 192, + NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = 193, + NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = 194, + NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = 195, + NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = 196, + NVML_GPM_METRIC_GR5_CTXSW_REQUESTS = 197, + NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = 198, + NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = 199, + NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = 200, + NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = 201, + NVML_GPM_METRIC_GR6_CTXSW_REQUESTS = 202, + NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = 203, + NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = 204, + NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = 205, + NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = 206, + NVML_GPM_METRIC_GR7_CTXSW_REQUESTS = 207, + NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = 208, + NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = 209, + NVML_GPM_METRIC_MAX = 210, //!< Maximum value above +1. Note that changing this should also change NVML_GPM_METRICS_GET_VERSION due to struct size change } nvmlGpmMetricId_t; /** @} */ // @defgroup nvmlGpmEnums @@ -9402,8 +12614,9 @@ typedef enum */ /***************************************************************************************************/ -/* Handle to an allocated GPM sample allocated with nvmlGpmSampleAlloc() - Free this with nvmlGpmSampleFree() */ +/** + * Handle to an allocated GPM sample allocated with nvmlGpmSampleAlloc(). Free this with nvmlGpmSampleFree(). + */ typedef struct { struct nvmlGpmSample_st* handle; @@ -9415,30 +12628,38 @@ typedef struct { char *unit; } nvmlGpmMetricMetricInfo_t; +/** + * GPM metric information. + */ typedef struct { - unsigned int metricId; /* IN: NVML_GPM_METRIC_? #define of which metric to retrieve */ - nvmlReturn_t nvmlReturn; /* OUT: Status of this metric. If this is nonzero, then value is not valid */ - double value; /* OUT: Value of this metric. Is only valid if nvmlReturn is 0 (NVML_SUCCESS) */ - nvmlGpmMetricMetricInfo_t metricInfo; /* OUT: Metric name and unit. Those can be NULL if not defined */ + unsigned int metricId; //!< IN: NVML_GPM_METRIC_? define of which metric to retrieve + nvmlReturn_t nvmlReturn; //!< OUT: Status of this metric. If this is nonzero, then value is not valid + double value; //!< OUT: Value of this metric. Is only valid if nvmlReturn is 0 (NVML_SUCCESS) + nvmlGpmMetricMetricInfo_t metricInfo; //!< OUT: Metric name and unit. Those can be NULL if not defined } nvmlGpmMetric_t; +/** + * GPM buffer information. + */ typedef struct { - unsigned int version; /* IN: Set to NVML_GPM_METRICS_GET_VERSION */ - unsigned int numMetrics; /* IN: How many metrics to retrieve in metrics[] */ - nvmlGpmSample_t sample1; /* IN: Sample buffer */ - nvmlGpmSample_t sample2; /* IN: Sample buffer */ - nvmlGpmMetric_t metrics[NVML_GPM_METRIC_MAX]; /* IN/OUT: Array of metrics. Set metricId on call. - see nvmlReturn and value on return */ + unsigned int version; //!< IN: Set to NVML_GPM_METRICS_GET_VERSION + unsigned int numMetrics; //!< IN: How many metrics to retrieve in metrics[] + nvmlGpmSample_t sample1; //!< IN: Sample buffer + nvmlGpmSample_t sample2; //!< IN: Sample buffer + nvmlGpmMetric_t metrics[NVML_GPM_METRIC_MAX]; //!< IN/OUT: Array of metrics. Set metricId on call. See nvmlReturn and value on return } nvmlGpmMetricsGet_t; #define NVML_GPM_METRICS_GET_VERSION 1 +/** + * GPM device information. + */ typedef struct { - unsigned int version; /* IN: Set to NVML_GPM_SUPPORT_VERSION */ - unsigned int isSupportedDevice; /* OUT: Indicates device support */ + unsigned int version; //!< IN: Set to NVML_GPM_SUPPORT_VERSION + unsigned int isSupportedDevice; //!< OUT: Indicates device support } nvmlGpmSupport_t; #define NVML_GPM_SUPPORT_VERSION 1 @@ -9454,10 +12675,17 @@ typedef struct /** * Calculate GPM metrics from two samples. * + * For Hopper &tm; or newer fully supported devices. * - * @param metricsGet IN/OUT: populated nvmlGpmMetricsGet_t struct + * To retrieve metrics, the user must first allocate the two sample buffers at \a metricsGet->sample1 + * and \a metricsGet->sample2 by calling \a nvmlGpmSampleAlloc(). Next, the user should fill in the ID of each metric + * in \a metricsGet->metrics[i].metricId and specify the total number of metrics to retrieve in \a metricsGet->numMetrics, + * The version should be set to NVML_GPM_METRICS_GET_VERSION in \a metricsGet->version. The user then calls the + * \a nvmlGpmSampleGet() API twice to obtain 2 samples of counters. \note that the interval between these + * two \a nvmlGpmSampleGet() calls should be greater than 100ms due to the internal sample refresh rate. + * Finally, the user calls \a nvmlGpmMetricsGet to retrieve the metrics, which will be stored at \a metricsGet->metrics * - * %HOPPER_OR_NEWER% + * @param metricsGet IN/OUT: populated \a nvmlGpmMetricsGet_t struct * * @return * - \ref NVML_SUCCESS on success @@ -9469,7 +12697,7 @@ nvmlReturn_t DECLDIR nvmlGpmMetricsGet(nvmlGpmMetricsGet_t *metricsGet); /** * Free an allocated sample buffer that was allocated with \ref nvmlGpmSampleAlloc() * - * %HOPPER_OR_NEWER% + * For Hopper &tm; or newer fully supported devices. * * @param gpmSample Sample to free * @@ -9484,7 +12712,7 @@ nvmlReturn_t DECLDIR nvmlGpmSampleFree(nvmlGpmSample_t gpmSample); * Allocate a sample buffer to be used with NVML GPM . You will need to allocate * at least two of these buffers to use with the NVML GPM feature * - * %HOPPER_OR_NEWER% + * For Hopper &tm; or newer fully supported devices. * * @param gpmSample Where the allocated sample will be stored * @@ -9500,7 +12728,10 @@ nvmlReturn_t DECLDIR nvmlGpmSampleAlloc(nvmlGpmSample_t *gpmSample); * two samples are gathered, you can call nvmlGpmMetricGet on those samples to * retrive metrics * - * %HOPPER_OR_NEWER% + * For Hopper &tm; or newer fully supported devices. + * + * @note The interval between two \a nvmlGpmSampleGet() calls should be greater than 100ms due to + * the internal sample refresh rate. * * @param device Device to get samples for * @param gpmSample Buffer to read samples into @@ -9517,7 +12748,10 @@ nvmlReturn_t DECLDIR nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSa * After two samples are gathered, you can call nvmlGpmMetricGet on those * samples to retrive metrics * - * %HOPPER_OR_NEWER% + * For Hopper &tm; or newer fully supported devices. + * + * @note The interval between two \a nvmlGpmMigSampleGet() calls should be greater than 100ms due to + * the internal sample refresh rate. * * @param device Device to get samples for * @param gpuInstanceId MIG GPU Instance ID @@ -9532,8 +12766,10 @@ nvmlReturn_t DECLDIR nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuIn /** * Indicate whether the supplied device supports GPM * + * For Hopper &tm; or newer fully supported devices. + * * @param device NVML device to query for - * @param gpmSupport Structure to indicate GPM support. Indicates + * @param gpmSupport Structure to indicate GPM support \a nvmlGpmSupport_t. Indicates * GPM support per system for the supplied device * * @return @@ -9542,89 +12778,387 @@ nvmlReturn_t DECLDIR nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuIn */ nvmlReturn_t DECLDIR nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t *gpmSupport); -/** @} */ // @defgroup nvmlGpmFunctions -/** @} */ // @defgroup GPM - -/***************************************************************************************************/ -/** @defgroup nvmlDevice definitions related to Counter Collection Unit - * @{ - */ -/***************************************************************************************************/ - -/* CCU Stream State */ -#define NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE 0 -#define NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE 1 - +/* GPM Stream State */ /** - * Get counter collection unit stream state. + * Get GPM stream state. * - * %HOPPER_OR_NEWER% + * For Hopper &tm; or newer fully supported devices. * Supported on Linux, Windows TCC. * * @param device The identifier of the target device - * @param state Returns counter collection unit stream state - * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE or - * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE + * @param state Returns GPM stream state + * NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED * * @return - * - \ref NVML_SUCCESS if \a current counter collection unit stream state were successfully queried + * - \ref NVML_SUCCESS if \a current GPM stream state were successfully queried * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a state is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device */ -nvmlReturn_t DECLDIR nvmlDeviceCcuGetStreamState(nvmlDevice_t device, unsigned int *state); +nvmlReturn_t DECLDIR nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int *state); /** - * Set counter collection unit stream state. + * Set GPM stream state. * - * %HOPPER_OR_NEWER% + * For Hopper &tm; or newer fully supported devices. * Supported on Linux, Windows TCC. * * @param device The identifier of the target device - * @param state Counter collection unit stream state, - * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE or - * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE + * @param state GPM stream state, + * NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED * * @return - * - \ref NVML_SUCCESS if \a current counter collection unit stream state is successfully set + * - \ref NVML_SUCCESS if \a current GPM stream state is successfully set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device */ -nvmlReturn_t DECLDIR nvmlDeviceCcuSetStreamState(nvmlDevice_t device, unsigned int state); +nvmlReturn_t DECLDIR nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state); -/** @} */ // @defgroup CCU +/** @} */ // @defgroup nvmlGpmFunctions +/** @} */ // @defgroup GPM -#define NVML_NVLINK_POWER_STATE_HIGH_SPEED 0x0 -#define NVML_NVLINK_POWER_STATE_LOW 0x1 +#define NVML_DEV_CAP_EGM (1 << 0) // Extended GPU memory +/** + * Device capabilities + */ +typedef struct +{ + unsigned int version; //!< the API version number + unsigned int capMask; //!< OUT: Bit mask of capabilities. +} nvmlDeviceCapabilities_v1_t; +typedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t; +#define nvmlDeviceCapabilities_v1 NVML_STRUCT_VERSION(DeviceCapabilities, 1) -#define NVML_NVLINK_LOW_POWER_THRESHOLD_MIN 0x1 -#define NVML_NVLINK_LOW_POWER_THRESHOLD_MAX 0x1FFF -#define NVML_NVLINK_LOW_POWER_THRESHOLD_RESET 0xFFFFFFFF +/** + * Get device capabilities + * + * See \ref nvmlDeviceCapabilities_v1_t for more information on the struct. + * + * @param device The identifier of the target device + * @param caps Returns GPU's capabilities + * + * @return + * - \ref NVML_SUCCESS If the query is success + * - \ref NVML_ERROR_UNINITIALIZED If the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device is invalid or \a counters is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED If the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST If the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH If the provided version is invalid/unsupported + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCapabilities(nvmlDevice_t device, + nvmlDeviceCapabilities_t *caps); -/* Structure containing Low Power parameters */ -typedef struct nvmlNvLinkPowerThres_st +/* + * Generic bitmask to hold 255 bits, represented by 8 elements of 32 bits + */ +#define NVML_255_MASK_BITS_PER_ELEM 32 +#define NVML_255_MASK_NUM_ELEMS 8 +#define NVML_255_MASK_BIT_SET(index, nvmlMask) \ + nvmlMask.mask[index / NVML_255_MASK_BITS_PER_ELEM] |= (1 << (index % NVML_255_MASK_BITS_PER_ELEM)) + +#define NVML_255_MASK_BIT_GET(index, nvmlMask) \ + nvmlMask.mask[index / NVML_255_MASK_BITS_PER_ELEM] & (1 << (index % NVML_255_MASK_BITS_PER_ELEM)) + +#define NVML_255_MASK_BIT_SET_PTR(index, nvmlMask) \ + nvmlMask->mask[index / NVML_255_MASK_BITS_PER_ELEM] |= (1 << (index % NVML_255_MASK_BITS_PER_ELEM)) + +#define NVML_255_MASK_BIT_GET_PTR(index, nvmlMask) \ + nvmlMask->mask[index / NVML_255_MASK_BITS_PER_ELEM] & (1 << (index % NVML_255_MASK_BITS_PER_ELEM)) + +typedef struct { - unsigned int lowPwrThreshold; //!< Low power threshold (in units of 100us) -} nvmlNvLinkPowerThres_t; + unsigned int mask[NVML_255_MASK_NUM_ELEMS]; //profileId is used and + * the rest of the structure is ignored. + * + * @return + * - \ref NVML_SUCCESS if the Desired Profile was successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT if device is invalid or structure was NULL + * - \ref NVML_ERROR_NO_PERMISSION if user does not have permission to change the profile number + * - \ref NVML_ERROR_NOT_SUPPORTED if this feature is not supported by the device * **/ -nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t *info); +nvmlReturn_t DECLDIR nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, + nvmlPowerSmoothingProfile_t *profile); + +/** + * Update the value of a specific profile parameter contained within \ref nvmlPowerSmoothingProfile_v1_t. + * Requires root/admin permissions. + * + * %BLACKWELL_OR_NEWER% + * + * NVML_POWER_SMOOTHING_PROFILE_PARAM_PERCENT_TMP_FLOOR expects a value as a percentage from 00.00-100.00% + * NVML_POWER_SMOOTHING_PROFILE_PARAM_RAMP_UP_RATE expects a value in W/s + * NVML_POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_RATE expects a value in W/s + * NVML_POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_HYSTERESIS expects a value in ms + * + * @param device The identifier of the target device + * @param profile Reference to \ref nvmlPowerSmoothingProfile_v1_t struct + * + * @return + * - \ref NVML_SUCCESS if the Active Profile was successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT if device is invalid or profile parameter/value was invalid + * - \ref NVML_ERROR_NO_PERMISSION if user does not have permission to change any profile parameters + * - \ref NVML_ERROR_ARGUMENT_VERSION_MISMATCH if the structure version is not supported + * + **/ +nvmlReturn_t DECLDIR nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t device, + nvmlPowerSmoothingProfile_t *profile); +/** + * Enable or disable the Power Smoothing Feature. + * Requires root/admin permissions. + * + * %BLACKWELL_OR_NEWER% + * + * See \ref nvmlEnableState_t for details on allowed states + * + * @param device The identifier of the target device + * @param state Reference to \ref nvmlPowerSmoothingState_v1_t + * + * @return + * - \ref NVML_SUCCESS if the feature state was successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT if device is invalid or state is NULL + * - \ref NVML_ERROR_NO_PERMISSION if user does not have permission to change feature state + * - \ref NVML_ERROR_NOT_SUPPORTED if this feature is not supported by the device + * + **/ +nvmlReturn_t DECLDIR nvmlDevicePowerSmoothingSetState(nvmlDevice_t device, + nvmlPowerSmoothingState_t *state); +/** @} */ // @defgroup /** * NVML API versioning support @@ -9653,7 +13187,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses(nvmlDevice_t device nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t *placements, unsigned int *count); nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); - +nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); #endif // #ifdef NVML_NO_UNVERSIONED_FUNC_DEFS #if defined(NVML_NO_UNVERSIONED_FUNC_DEFS) @@ -9679,6 +13213,8 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo(nvmlVgpuInstance_t vgpuInsta #undef nvmlGetBlacklistDeviceInfoByIndex #undef nvmlDeviceGetGpuInstancePossiblePlacements #undef nvmlVgpuInstanceGetLicenseInfo +#undef nvmlDeviceGetDriverModel +#undef nvmlDeviceSetPowerManagementLimit #endif diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go index 03145b2..f824710 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go @@ -79,3 +79,70 @@ func (l *library) SystemGetTopologyGpuSet(cpuNumber int) ([]Device, Return) { ret = nvmlSystemGetTopologyGpuSet(uint32(cpuNumber), &count, &deviceArray[0]) return convertSlice[nvmlDevice, Device](deviceArray), ret } + +// nvml.SystemGetConfComputeCapabilities() +func (l *library) SystemGetConfComputeCapabilities() (ConfComputeSystemCaps, Return) { + var capabilities ConfComputeSystemCaps + ret := nvmlSystemGetConfComputeCapabilities(&capabilities) + return capabilities, ret +} + +// nvml.SystemGetConfComputeState() +func (l *library) SystemGetConfComputeState() (ConfComputeSystemState, Return) { + var state ConfComputeSystemState + ret := nvmlSystemGetConfComputeState(&state) + return state, ret +} + +// nvml.SystemGetConfComputeGpusReadyState() +func (l *library) SystemGetConfComputeGpusReadyState() (uint32, Return) { + var isAcceptingWork uint32 + ret := nvmlSystemGetConfComputeGpusReadyState(&isAcceptingWork) + return isAcceptingWork, ret +} + +// nvml.SystemSetConfComputeGpusReadyState() +func (l *library) SystemSetConfComputeGpusReadyState(isAcceptingWork uint32) Return { + return nvmlSystemSetConfComputeGpusReadyState(isAcceptingWork) +} + +// nvml.SystemSetNvlinkBwMode() +func (l *library) SystemSetNvlinkBwMode(nvlinkBwMode uint32) Return { + return nvmlSystemSetNvlinkBwMode(nvlinkBwMode) +} + +// nvml.SystemGetNvlinkBwMode() +func (l *library) SystemGetNvlinkBwMode() (uint32, Return) { + var nvlinkBwMode uint32 + ret := nvmlSystemGetNvlinkBwMode(&nvlinkBwMode) + return nvlinkBwMode, ret +} + +// nvml.SystemGetConfComputeKeyRotationThresholdInfo() +func (l *library) SystemGetConfComputeKeyRotationThresholdInfo() (ConfComputeGetKeyRotationThresholdInfo, Return) { + var keyRotationThresholdInfo ConfComputeGetKeyRotationThresholdInfo + keyRotationThresholdInfo.Version = STRUCT_VERSION(keyRotationThresholdInfo, 1) + ret := nvmlSystemGetConfComputeKeyRotationThresholdInfo(&keyRotationThresholdInfo) + return keyRotationThresholdInfo, ret +} + +// nvml.SystemGetConfComputeSettings() +func (l *library) SystemGetConfComputeSettings() (SystemConfComputeSettings, Return) { + var settings SystemConfComputeSettings + settings.Version = STRUCT_VERSION(settings, 1) + ret := nvmlSystemGetConfComputeSettings(&settings) + return settings, ret +} + +// nvml.SystemSetConfComputeKeyRotationThresholdInfo() +func (l *library) SystemSetConfComputeKeyRotationThresholdInfo(keyRotationThresholdInfo ConfComputeSetKeyRotationThresholdInfo) Return { + return nvmlSystemSetConfComputeKeyRotationThresholdInfo(&keyRotationThresholdInfo) +} + +// nvml.SystemGetDriverBranch() +func (l *library) SystemGetDriverBranch() (SystemDriverBranchInfo, Return) { + var branchInfo SystemDriverBranchInfo + branchInfo.Version = STRUCT_VERSION(branchInfo, 1) + ret := nvmlSystemGetDriverBranch(&branchInfo, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return branchInfo, ret +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go index 6a57bab..3be1796 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go @@ -9,6 +9,34 @@ type nvmlDevice struct { Handle *_Ctype_struct_nvmlDevice_st } +type nvmlGpuInstance struct { + Handle *_Ctype_struct_nvmlGpuInstance_st +} + +type PciInfoExt_v1 struct { + Version uint32 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BaseClass uint32 + SubClass uint32 + BusId [32]int8 +} + +type PciInfoExt struct { + Version uint32 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BaseClass uint32 + SubClass uint32 + BusId [32]int8 +} + type PciInfo struct { BusIdLegacy [16]int8 Domain uint32 @@ -70,6 +98,28 @@ type ProcessInfo struct { ComputeInstanceId uint32 } +type ProcessDetail_v1 struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 + UsedGpuCcProtectedMemory uint64 +} + +type ProcessDetailList_v1 struct { + Version uint32 + Mode uint32 + NumProcArrayEntries uint32 + ProcArray *ProcessDetail_v1 +} + +type ProcessDetailList struct { + Version uint32 + Mode uint32 + NumProcArrayEntries uint32 + ProcArray *ProcessDetail_v1 +} + type DeviceAttributes struct { MultiprocessorCount uint32 SharedCopyEngineCount uint32 @@ -82,6 +132,10 @@ type DeviceAttributes struct { MemorySizeMB uint64 } +type C2cModeInfo_v1 struct { + IsC2cEnabled uint32 +} + type RowRemapperHistogramValues struct { Max uint32 High uint32 @@ -132,6 +186,58 @@ type GpuThermalSettings struct { Sensor [3]GpuThermalSettingsSensor } +type CoolerInfo_v1 struct { + Version uint32 + Index uint32 + SignalType uint32 + Target uint32 +} + +type CoolerInfo struct { + Version uint32 + Index uint32 + SignalType uint32 + Target uint32 +} + +const sizeofUUIDValue = unsafe.Sizeof([41]byte{}) + +type UUIDValue [sizeofUUIDValue]byte + +type UUID_v1 struct { + Version uint32 + Type uint32 + Value [41]byte + Pad_cgo_0 [3]byte +} + +type UUID struct { + Version uint32 + Type uint32 + Value [41]byte + Pad_cgo_0 [3]byte +} + +type DramEncryptionInfo_v1 struct { + Version uint32 + EncryptionState uint32 +} + +type DramEncryptionInfo struct { + Version uint32 + EncryptionState uint32 +} + +type MarginTemperature_v1 struct { + Version uint32 + MarginTemperature int32 +} + +type MarginTemperature struct { + Version uint32 + MarginTemperature int32 +} + type ClkMonFaultInfo struct { ClkApiDomain uint32 ClkDomainFaultMask uint32 @@ -143,10 +249,248 @@ type ClkMonStatus struct { ClkMonList [32]ClkMonFaultInfo } +type ClockOffset_v1 struct { + Version uint32 + Type uint32 + Pstate uint32 + ClockOffsetMHz int32 + MinClockOffsetMHz int32 + MaxClockOffsetMHz int32 +} + +type ClockOffset struct { + Version uint32 + Type uint32 + Pstate uint32 + ClockOffsetMHz int32 + MinClockOffsetMHz int32 + MaxClockOffsetMHz int32 +} + +type FanSpeedInfo_v1 struct { + Version uint32 + Fan uint32 + Speed uint32 +} + +type FanSpeedInfo struct { + Version uint32 + Fan uint32 + Speed uint32 +} + +type DevicePerfModes_v1 struct { + Version uint32 + Str [2048]int8 +} + +type DevicePerfModes struct { + Version uint32 + Str [2048]int8 +} + +type DeviceCurrentClockFreqs_v1 struct { + Version uint32 + Str [2048]int8 +} + +type DeviceCurrentClockFreqs struct { + Version uint32 + Str [2048]int8 +} + +type ProcessUtilizationSample struct { + Pid uint32 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type ProcessUtilizationInfo_v1 struct { + TimeStamp uint64 + Pid uint32 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 + JpgUtil uint32 + OfaUtil uint32 + Pad_cgo_0 [4]byte +} + +type ProcessesUtilizationInfo_v1 struct { + Version uint32 + ProcessSamplesCount uint32 + LastSeenTimeStamp uint64 + ProcUtilArray *ProcessUtilizationInfo_v1 +} + +type ProcessesUtilizationInfo struct { + Version uint32 + ProcessSamplesCount uint32 + LastSeenTimeStamp uint64 + ProcUtilArray *ProcessUtilizationInfo_v1 +} + +type EccSramErrorStatus_v1 struct { + Version uint32 + AggregateUncParity uint64 + AggregateUncSecDed uint64 + AggregateCor uint64 + VolatileUncParity uint64 + VolatileUncSecDed uint64 + VolatileCor uint64 + AggregateUncBucketL2 uint64 + AggregateUncBucketSm uint64 + AggregateUncBucketPcie uint64 + AggregateUncBucketMcu uint64 + AggregateUncBucketOther uint64 + BThresholdExceeded uint32 + Pad_cgo_0 [4]byte +} + +type EccSramErrorStatus struct { + Version uint32 + AggregateUncParity uint64 + AggregateUncSecDed uint64 + AggregateCor uint64 + VolatileUncParity uint64 + VolatileUncSecDed uint64 + VolatileCor uint64 + AggregateUncBucketL2 uint64 + AggregateUncBucketSm uint64 + AggregateUncBucketPcie uint64 + AggregateUncBucketMcu uint64 + AggregateUncBucketOther uint64 + BThresholdExceeded uint32 + Pad_cgo_0 [4]byte +} + +type PlatformInfo_v1 struct { + Version uint32 + IbGuid [16]uint8 + RackGuid [16]uint8 + ChassisPhysicalSlotNumber uint8 + ComputeSlotIndex uint8 + NodeIndex uint8 + PeerType uint8 + ModuleId uint8 + Pad_cgo_0 [3]byte +} + +type PlatformInfo_v2 struct { + Version uint32 + IbGuid [16]uint8 + ChassisSerialNumber [16]uint8 + SlotNumber uint8 + TrayIndex uint8 + HostId uint8 + PeerType uint8 + ModuleId uint8 + Pad_cgo_0 [3]byte +} + +type PlatformInfo struct { + Version uint32 + IbGuid [16]uint8 + ChassisSerialNumber [16]uint8 + SlotNumber uint8 + TrayIndex uint8 + HostId uint8 + PeerType uint8 + ModuleId uint8 + Pad_cgo_0 [3]byte +} + +type DeviceArchitecture uint32 + +type BusType uint32 + +type FanControlPolicy uint32 + +type PowerSource uint32 + +type GpuDynamicPstatesInfoUtilization struct { + BIsPresent uint32 + Percentage uint32 + IncThreshold uint32 + DecThreshold uint32 +} + +type GpuDynamicPstatesInfo struct { + Flags uint32 + Utilization [8]GpuDynamicPstatesInfoUtilization +} + +type PowerScopeType byte + +type PowerValue_v2 struct { + Version uint32 + PowerScope uint8 + PowerValueMw uint32 +} + type nvmlVgpuTypeId uint32 type nvmlVgpuInstance uint32 +type VgpuHeterogeneousMode_v1 struct { + Version uint32 + Mode uint32 +} + +type VgpuHeterogeneousMode struct { + Version uint32 + Mode uint32 +} + +type VgpuPlacementId_v1 struct { + Version uint32 + PlacementId uint32 +} + +type VgpuPlacementId struct { + Version uint32 + PlacementId uint32 +} + +type VgpuPlacementList_v1 struct { + Version uint32 + PlacementSize uint32 + Count uint32 + PlacementIds *uint32 +} + +type VgpuPlacementList_v2 struct { + Version uint32 + PlacementSize uint32 + Count uint32 + PlacementIds *uint32 + Mode uint32 + Pad_cgo_0 [4]byte +} + +type VgpuPlacementList struct { + Version uint32 + PlacementSize uint32 + Count uint32 + PlacementIds *uint32 + Mode uint32 + Pad_cgo_0 [4]byte +} + +type VgpuTypeBar1Info_v1 struct { + Version uint32 + Bar1Size uint64 +} + +type VgpuTypeBar1Info struct { + Version uint32 + Bar1Size uint64 +} + type VgpuInstanceUtilizationSample struct { VgpuInstance uint32 TimeStamp uint64 @@ -156,6 +500,34 @@ type VgpuInstanceUtilizationSample struct { DecUtil [8]byte } +type VgpuInstanceUtilizationInfo_v1 struct { + TimeStamp uint64 + VgpuInstance uint32 + Pad_cgo_0 [4]byte + SmUtil [8]byte + MemUtil [8]byte + EncUtil [8]byte + DecUtil [8]byte + JpgUtil [8]byte + OfaUtil [8]byte +} + +type VgpuInstancesUtilizationInfo_v1 struct { + Version uint32 + SampleValType uint32 + VgpuInstanceCount uint32 + LastSeenTimeStamp uint64 + VgpuUtilArray *VgpuInstanceUtilizationInfo_v1 +} + +type VgpuInstancesUtilizationInfo struct { + Version uint32 + SampleValType uint32 + VgpuInstanceCount uint32 + LastSeenTimeStamp uint64 + VgpuUtilArray *VgpuInstanceUtilizationInfo_v1 +} + type VgpuProcessUtilizationSample struct { VgpuInstance uint32 Pid uint32 @@ -167,6 +539,43 @@ type VgpuProcessUtilizationSample struct { DecUtil uint32 } +type VgpuProcessUtilizationInfo_v1 struct { + ProcessName [64]int8 + TimeStamp uint64 + VgpuInstance uint32 + Pid uint32 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 + JpgUtil uint32 + OfaUtil uint32 +} + +type VgpuProcessesUtilizationInfo_v1 struct { + Version uint32 + VgpuProcessCount uint32 + LastSeenTimeStamp uint64 + VgpuProcUtilArray *VgpuProcessUtilizationInfo_v1 +} + +type VgpuProcessesUtilizationInfo struct { + Version uint32 + VgpuProcessCount uint32 + LastSeenTimeStamp uint64 + VgpuProcUtilArray *VgpuProcessUtilizationInfo_v1 +} + +type VgpuRuntimeState_v1 struct { + Version uint32 + Size uint64 +} + +type VgpuRuntimeState struct { + Version uint32 + Size uint64 +} + type VgpuSchedulerParamsVgpuSchedDataWithARR struct { AvgFactor uint32 Timeslice uint32 @@ -192,7 +601,7 @@ type VgpuSchedulerLogEntry struct { type VgpuSchedulerLog struct { EngineId uint32 SchedulerPolicy uint32 - IsEnabledARR uint32 + ArrMode uint32 SchedulerParams [8]byte EntriesCount uint32 LogEntries [200]VgpuSchedulerLogEntry @@ -200,7 +609,7 @@ type VgpuSchedulerLog struct { type VgpuSchedulerGetState struct { SchedulerPolicy uint32 - IsEnabledARR uint32 + ArrMode uint32 SchedulerParams [8]byte } @@ -251,15 +660,6 @@ type VgpuLicenseInfo struct { CurrentState uint32 } -type ProcessUtilizationSample struct { - Pid uint32 - TimeStamp uint64 - SmUtil uint32 - MemUtil uint32 - EncUtil uint32 - DecUtil uint32 -} - type GridLicenseExpiry struct { Year uint32 Month uint16 @@ -286,24 +686,114 @@ type GridLicensableFeatures struct { GridLicensableFeatures [3]GridLicensableFeature } -type DeviceArchitecture uint32 +type VgpuTypeIdInfo_v1 struct { + Version uint32 + VgpuCount uint32 + VgpuTypeIds *uint32 +} -type BusType uint32 +type VgpuTypeIdInfo struct { + Version uint32 + VgpuCount uint32 + VgpuTypeIds *uint32 +} -type FanControlPolicy uint32 +type VgpuTypeMaxInstance_v1 struct { + Version uint32 + VgpuTypeId uint32 + MaxInstancePerGI uint32 +} -type PowerSource uint32 +type VgpuTypeMaxInstance struct { + Version uint32 + VgpuTypeId uint32 + MaxInstancePerGI uint32 +} -type GpuDynamicPstatesInfoUtilization struct { - BIsPresent uint32 - Percentage uint32 - IncThreshold uint32 - DecThreshold uint32 +type ActiveVgpuInstanceInfo_v1 struct { + Version uint32 + VgpuCount uint32 + VgpuInstances *uint32 } -type GpuDynamicPstatesInfo struct { - Flags uint32 - Utilization [8]GpuDynamicPstatesInfoUtilization +type ActiveVgpuInstanceInfo struct { + Version uint32 + VgpuCount uint32 + VgpuInstances *uint32 +} + +type VgpuSchedulerState_v1 struct { + Version uint32 + EngineId uint32 + SchedulerPolicy uint32 + EnableARRMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerState struct { + Version uint32 + EngineId uint32 + SchedulerPolicy uint32 + EnableARRMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerStateInfo_v1 struct { + Version uint32 + EngineId uint32 + SchedulerPolicy uint32 + ArrMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerStateInfo struct { + Version uint32 + EngineId uint32 + SchedulerPolicy uint32 + ArrMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerLogInfo_v1 struct { + Version uint32 + EngineId uint32 + SchedulerPolicy uint32 + ArrMode uint32 + SchedulerParams [8]byte + EntriesCount uint32 + LogEntries [200]VgpuSchedulerLogEntry +} + +type VgpuSchedulerLogInfo struct { + Version uint32 + EngineId uint32 + SchedulerPolicy uint32 + ArrMode uint32 + SchedulerParams [8]byte + EntriesCount uint32 + LogEntries [200]VgpuSchedulerLogEntry +} + +type VgpuCreatablePlacementInfo_v1 struct { + Version uint32 + VgpuTypeId uint32 + Count uint32 + PlacementIds *uint32 + PlacementSize uint32 + Pad_cgo_0 [4]byte +} + +type VgpuCreatablePlacementInfo struct { + Version uint32 + VgpuTypeId uint32 + Count uint32 + PlacementIds *uint32 + PlacementSize uint32 + Pad_cgo_0 [4]byte +} + +type NvLinkPowerThres struct { + LowPwrThreshold uint32 } type FieldValue struct { @@ -366,6 +856,66 @@ type nvmlEventData struct { ComputeInstanceId uint32 } +type SystemEventSet struct { + Handle *_Ctype_struct_nvmlSystemEventSet_st +} + +type SystemEventSetCreateRequest_v1 struct { + Version uint32 + Set SystemEventSet +} + +type SystemEventSetCreateRequest struct { + Version uint32 + Set SystemEventSet +} + +type SystemEventSetFreeRequest_v1 struct { + Version uint32 + Set SystemEventSet +} + +type SystemEventSetFreeRequest struct { + Version uint32 + Set SystemEventSet +} + +type SystemRegisterEventRequest_v1 struct { + Version uint32 + EventTypes uint64 + Set SystemEventSet +} + +type SystemRegisterEventRequest struct { + Version uint32 + EventTypes uint64 + Set SystemEventSet +} + +type SystemEventData_v1 struct { + EventType uint64 + GpuId uint32 + Pad_cgo_0 [4]byte +} + +type SystemEventSetWaitRequest_v1 struct { + Version uint32 + Timeoutms uint32 + Set SystemEventSet + Data *SystemEventData_v1 + DataSize uint32 + NumEvent uint32 +} + +type SystemEventSetWaitRequest struct { + Version uint32 + Timeoutms uint32 + Set SystemEventSet + Data *SystemEventData_v1 + DataSize uint32 + NumEvent uint32 +} + type AccountingStats struct { GpuUtilization uint32 MemoryUtilization uint32 @@ -408,18 +958,166 @@ type FBCSessionInfo struct { AverageLatency uint32 } +type ConfComputeSystemCaps struct { + CpuCaps uint32 + GpusCaps uint32 +} + +type ConfComputeSystemState struct { + Environment uint32 + CcFeature uint32 + DevToolsMode uint32 +} + +type SystemConfComputeSettings_v1 struct { + Version uint32 + Environment uint32 + CcFeature uint32 + DevToolsMode uint32 + MultiGpuMode uint32 +} + +type SystemConfComputeSettings struct { + Version uint32 + Environment uint32 + CcFeature uint32 + DevToolsMode uint32 + MultiGpuMode uint32 +} + +type ConfComputeMemSizeInfo struct { + ProtectedMemSizeKib uint64 + UnprotectedMemSizeKib uint64 +} + +type ConfComputeGpuCertificate struct { + CertChainSize uint32 + AttestationCertChainSize uint32 + CertChain [4096]uint8 + AttestationCertChain [5120]uint8 +} + +type ConfComputeGpuAttestationReport struct { + IsCecAttestationReportPresent uint32 + AttestationReportSize uint32 + CecAttestationReportSize uint32 + Nonce [32]uint8 + AttestationReport [8192]uint8 + CecAttestationReport [4096]uint8 +} + +type ConfComputeSetKeyRotationThresholdInfo_v1 struct { + Version uint32 + MaxAttackerAdvantage uint64 +} + +type ConfComputeSetKeyRotationThresholdInfo struct { + Version uint32 + MaxAttackerAdvantage uint64 +} + +type ConfComputeGetKeyRotationThresholdInfo_v1 struct { + Version uint32 + AttackerAdvantage uint64 +} + +type ConfComputeGetKeyRotationThresholdInfo struct { + Version uint32 + AttackerAdvantage uint64 +} + type GpuFabricState byte type GpuFabricInfo struct { - ClusterUuid [16]int8 + ClusterUuid [16]uint8 Status uint32 - PartitionId uint32 + CliqueId uint32 State uint8 Pad_cgo_0 [3]byte } +type GpuFabricInfo_v2 struct { + Version uint32 + ClusterUuid [16]uint8 + Status uint32 + CliqueId uint32 + State uint8 + HealthMask uint32 +} + +type GpuFabricInfoV struct { + Version uint32 + ClusterUuid [16]uint8 + Status uint32 + CliqueId uint32 + State uint8 + HealthMask uint32 +} + +type SystemDriverBranchInfo_v1 struct { + Version uint32 + Branch [80]int8 +} + +type SystemDriverBranchInfo struct { + Version uint32 + Branch [80]int8 +} + type AffinityScope uint32 +type Temperature_v1 struct { + Version uint32 + SensorType uint32 + Temperature int32 +} + +type Temperature struct { + Version uint32 + SensorType uint32 + Temperature int32 +} + +type NvlinkSupportedBwModes_v1 struct { + Version uint32 + BwModes [23]uint8 + TotalBwModes uint8 +} + +type NvlinkSupportedBwModes struct { + Version uint32 + BwModes [23]uint8 + TotalBwModes uint8 +} + +type NvlinkGetBwMode_v1 struct { + Version uint32 + BIsBest uint32 + BwMode uint8 + Pad_cgo_0 [3]byte +} + +type NvlinkGetBwMode struct { + Version uint32 + BIsBest uint32 + BwMode uint8 + Pad_cgo_0 [3]byte +} + +type NvlinkSetBwMode_v1 struct { + Version uint32 + BSetBest uint32 + BwMode uint8 + Pad_cgo_0 [3]byte +} + +type NvlinkSetBwMode struct { + Version uint32 + BSetBest uint32 + BwMode uint8 + Pad_cgo_0 [3]byte +} + type VgpuVersion struct { MinVersion uint32 MaxVersion uint32 @@ -494,6 +1192,23 @@ type GpuInstanceProfileInfo_v2 struct { Name [96]int8 } +type GpuInstanceProfileInfo_v3 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 + Name [96]int8 + Capabilities uint32 + Pad_cgo_0 [4]byte +} + type nvmlGpuInstanceInfo struct { Device nvmlDevice Id uint32 @@ -501,10 +1216,6 @@ type nvmlGpuInstanceInfo struct { Placement GpuInstancePlacement } -type nvmlGpuInstance struct { - Handle *_Ctype_struct_nvmlGpuInstance_st -} - type ComputeInstancePlacement struct { Start uint32 Size uint32 @@ -536,6 +1247,21 @@ type ComputeInstanceProfileInfo_v2 struct { Name [96]int8 } +type ComputeInstanceProfileInfo_v3 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + Name [96]int8 + Capabilities uint32 +} + type nvmlComputeInstanceInfo struct { Device nvmlDevice GpuInstance nvmlGpuInstance @@ -570,7 +1296,7 @@ type nvmlGpmMetricsGetType struct { NumMetrics uint32 Sample1 nvmlGpmSample Sample2 nvmlGpmSample - Metrics [98]GpmMetric + Metrics [210]GpmMetric } type GpmSupport struct { @@ -578,6 +1304,90 @@ type GpmSupport struct { IsSupportedDevice uint32 } -type NvLinkPowerThres struct { - LowPwrThreshold uint32 +type DeviceCapabilities_v1 struct { + Version uint32 + CapMask uint32 +} + +type DeviceCapabilities struct { + Version uint32 + CapMask uint32 +} + +type Mask255 struct { + Mask [8]uint32 +} + +type WorkloadPowerProfileInfo_v1 struct { + Version uint32 + ProfileId uint32 + Priority uint32 + ConflictingMask Mask255 +} + +type WorkloadPowerProfileInfo struct { + Version uint32 + ProfileId uint32 + Priority uint32 + ConflictingMask Mask255 +} + +type WorkloadPowerProfileProfilesInfo_v1 struct { + Version uint32 + PerfProfilesMask Mask255 + PerfProfile [255]WorkloadPowerProfileInfo +} + +type WorkloadPowerProfileProfilesInfo struct { + Version uint32 + PerfProfilesMask Mask255 + PerfProfile [255]WorkloadPowerProfileInfo +} + +type WorkloadPowerProfileCurrentProfiles_v1 struct { + Version uint32 + PerfProfilesMask Mask255 + RequestedProfilesMask Mask255 + EnforcedProfilesMask Mask255 +} + +type WorkloadPowerProfileCurrentProfiles struct { + Version uint32 + PerfProfilesMask Mask255 + RequestedProfilesMask Mask255 + EnforcedProfilesMask Mask255 +} + +type WorkloadPowerProfileRequestedProfiles_v1 struct { + Version uint32 + RequestedProfilesMask Mask255 +} + +type WorkloadPowerProfileRequestedProfiles struct { + Version uint32 + RequestedProfilesMask Mask255 +} + +type PowerSmoothingProfile_v1 struct { + Version uint32 + ProfileId uint32 + ParamId uint32 + Value float64 +} + +type PowerSmoothingProfile struct { + Version uint32 + ProfileId uint32 + ParamId uint32 + Value float64 +} + +type PowerSmoothingState_v1 struct { + Version uint32 + State uint32 +} + +type PowerSmoothingState struct { + Version uint32 + State uint32 } diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go index da49524..b1e0fa7 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go @@ -478,3 +478,32 @@ func (l *library) GetVgpuDriverCapabilities(capability VgpuDriverCapability) (bo ret := nvmlGetVgpuDriverCapabilities(capability, &capResult) return (capResult != 0), ret } + +// nvml.VgpuTypeGetBAR1Info() +func (l *library) VgpuTypeGetBAR1Info(vgpuTypeId VgpuTypeId) (VgpuTypeBar1Info, Return) { + return vgpuTypeId.GetBAR1Info() +} + +func (vgpuTypeId nvmlVgpuTypeId) GetBAR1Info() (VgpuTypeBar1Info, Return) { + var bar1Info VgpuTypeBar1Info + bar1Info.Version = STRUCT_VERSION(bar1Info, 1) + ret := nvmlVgpuTypeGetBAR1Info(vgpuTypeId, &bar1Info) + return bar1Info, ret +} + +// nvml.VgpuInstanceGetRuntimeStateSize() +func (l *library) VgpuInstanceGetRuntimeStateSize(vgpuInstance VgpuInstance) (VgpuRuntimeState, Return) { + return vgpuInstance.GetRuntimeStateSize() +} + +func (vgpuInstance nvmlVgpuInstance) GetRuntimeStateSize() (VgpuRuntimeState, Return) { + var pState VgpuRuntimeState + pState.Version = STRUCT_VERSION(pState, 1) + ret := nvmlVgpuInstanceGetRuntimeStateSize(vgpuInstance, &pState) + return pState, ret +} + +// nvml.VgpuTypeGetMaxInstancesPerGpuInstance() +func (l *library) VgpuTypeGetMaxInstancesPerGpuInstance(maxInstance *VgpuTypeMaxInstance) Return { + return nvmlVgpuTypeGetMaxInstancesPerGpuInstance(maxInstance) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go index 9997a27..bfe4d07 100644 --- a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/zz_generated.api.go @@ -20,297 +20,372 @@ package nvml // The variables below represent package level methods from the library type. var ( - ComputeInstanceDestroy = libnvml.ComputeInstanceDestroy - ComputeInstanceGetInfo = libnvml.ComputeInstanceGetInfo - DeviceCcuGetStreamState = libnvml.DeviceCcuGetStreamState - DeviceCcuSetStreamState = libnvml.DeviceCcuSetStreamState - DeviceClearAccountingPids = libnvml.DeviceClearAccountingPids - DeviceClearCpuAffinity = libnvml.DeviceClearCpuAffinity - DeviceClearEccErrorCounts = libnvml.DeviceClearEccErrorCounts - DeviceClearFieldValues = libnvml.DeviceClearFieldValues - DeviceCreateGpuInstance = libnvml.DeviceCreateGpuInstance - DeviceCreateGpuInstanceWithPlacement = libnvml.DeviceCreateGpuInstanceWithPlacement - DeviceDiscoverGpus = libnvml.DeviceDiscoverGpus - DeviceFreezeNvLinkUtilizationCounter = libnvml.DeviceFreezeNvLinkUtilizationCounter - DeviceGetAPIRestriction = libnvml.DeviceGetAPIRestriction - DeviceGetAccountingBufferSize = libnvml.DeviceGetAccountingBufferSize - DeviceGetAccountingMode = libnvml.DeviceGetAccountingMode - DeviceGetAccountingPids = libnvml.DeviceGetAccountingPids - DeviceGetAccountingStats = libnvml.DeviceGetAccountingStats - DeviceGetActiveVgpus = libnvml.DeviceGetActiveVgpus - DeviceGetAdaptiveClockInfoStatus = libnvml.DeviceGetAdaptiveClockInfoStatus - DeviceGetApplicationsClock = libnvml.DeviceGetApplicationsClock - DeviceGetArchitecture = libnvml.DeviceGetArchitecture - DeviceGetAttributes = libnvml.DeviceGetAttributes - DeviceGetAutoBoostedClocksEnabled = libnvml.DeviceGetAutoBoostedClocksEnabled - DeviceGetBAR1MemoryInfo = libnvml.DeviceGetBAR1MemoryInfo - DeviceGetBoardId = libnvml.DeviceGetBoardId - DeviceGetBoardPartNumber = libnvml.DeviceGetBoardPartNumber - DeviceGetBrand = libnvml.DeviceGetBrand - DeviceGetBridgeChipInfo = libnvml.DeviceGetBridgeChipInfo - DeviceGetBusType = libnvml.DeviceGetBusType - DeviceGetClkMonStatus = libnvml.DeviceGetClkMonStatus - DeviceGetClock = libnvml.DeviceGetClock - DeviceGetClockInfo = libnvml.DeviceGetClockInfo - DeviceGetComputeInstanceId = libnvml.DeviceGetComputeInstanceId - DeviceGetComputeMode = libnvml.DeviceGetComputeMode - DeviceGetComputeRunningProcesses = libnvml.DeviceGetComputeRunningProcesses - DeviceGetCount = libnvml.DeviceGetCount - DeviceGetCpuAffinity = libnvml.DeviceGetCpuAffinity - DeviceGetCpuAffinityWithinScope = libnvml.DeviceGetCpuAffinityWithinScope - DeviceGetCreatableVgpus = libnvml.DeviceGetCreatableVgpus - DeviceGetCudaComputeCapability = libnvml.DeviceGetCudaComputeCapability - DeviceGetCurrPcieLinkGeneration = libnvml.DeviceGetCurrPcieLinkGeneration - DeviceGetCurrPcieLinkWidth = libnvml.DeviceGetCurrPcieLinkWidth - DeviceGetCurrentClocksThrottleReasons = libnvml.DeviceGetCurrentClocksThrottleReasons - DeviceGetDecoderUtilization = libnvml.DeviceGetDecoderUtilization - DeviceGetDefaultApplicationsClock = libnvml.DeviceGetDefaultApplicationsClock - DeviceGetDefaultEccMode = libnvml.DeviceGetDefaultEccMode - DeviceGetDetailedEccErrors = libnvml.DeviceGetDetailedEccErrors - DeviceGetDeviceHandleFromMigDeviceHandle = libnvml.DeviceGetDeviceHandleFromMigDeviceHandle - DeviceGetDisplayActive = libnvml.DeviceGetDisplayActive - DeviceGetDisplayMode = libnvml.DeviceGetDisplayMode - DeviceGetDriverModel = libnvml.DeviceGetDriverModel - DeviceGetDynamicPstatesInfo = libnvml.DeviceGetDynamicPstatesInfo - DeviceGetEccMode = libnvml.DeviceGetEccMode - DeviceGetEncoderCapacity = libnvml.DeviceGetEncoderCapacity - DeviceGetEncoderSessions = libnvml.DeviceGetEncoderSessions - DeviceGetEncoderStats = libnvml.DeviceGetEncoderStats - DeviceGetEncoderUtilization = libnvml.DeviceGetEncoderUtilization - DeviceGetEnforcedPowerLimit = libnvml.DeviceGetEnforcedPowerLimit - DeviceGetFBCSessions = libnvml.DeviceGetFBCSessions - DeviceGetFBCStats = libnvml.DeviceGetFBCStats - DeviceGetFanControlPolicy_v2 = libnvml.DeviceGetFanControlPolicy_v2 - DeviceGetFanSpeed = libnvml.DeviceGetFanSpeed - DeviceGetFanSpeed_v2 = libnvml.DeviceGetFanSpeed_v2 - DeviceGetFieldValues = libnvml.DeviceGetFieldValues - DeviceGetGpcClkMinMaxVfOffset = libnvml.DeviceGetGpcClkMinMaxVfOffset - DeviceGetGpcClkVfOffset = libnvml.DeviceGetGpcClkVfOffset - DeviceGetGpuFabricInfo = libnvml.DeviceGetGpuFabricInfo - DeviceGetGpuInstanceById = libnvml.DeviceGetGpuInstanceById - DeviceGetGpuInstanceId = libnvml.DeviceGetGpuInstanceId - DeviceGetGpuInstancePossiblePlacements = libnvml.DeviceGetGpuInstancePossiblePlacements - DeviceGetGpuInstanceProfileInfo = libnvml.DeviceGetGpuInstanceProfileInfo - DeviceGetGpuInstanceProfileInfoV = libnvml.DeviceGetGpuInstanceProfileInfoV - DeviceGetGpuInstanceRemainingCapacity = libnvml.DeviceGetGpuInstanceRemainingCapacity - DeviceGetGpuInstances = libnvml.DeviceGetGpuInstances - DeviceGetGpuMaxPcieLinkGeneration = libnvml.DeviceGetGpuMaxPcieLinkGeneration - DeviceGetGpuOperationMode = libnvml.DeviceGetGpuOperationMode - DeviceGetGraphicsRunningProcesses = libnvml.DeviceGetGraphicsRunningProcesses - DeviceGetGridLicensableFeatures = libnvml.DeviceGetGridLicensableFeatures - DeviceGetGspFirmwareMode = libnvml.DeviceGetGspFirmwareMode - DeviceGetGspFirmwareVersion = libnvml.DeviceGetGspFirmwareVersion - DeviceGetHandleByIndex = libnvml.DeviceGetHandleByIndex - DeviceGetHandleByPciBusId = libnvml.DeviceGetHandleByPciBusId - DeviceGetHandleBySerial = libnvml.DeviceGetHandleBySerial - DeviceGetHandleByUUID = libnvml.DeviceGetHandleByUUID - DeviceGetHostVgpuMode = libnvml.DeviceGetHostVgpuMode - DeviceGetIndex = libnvml.DeviceGetIndex - DeviceGetInforomConfigurationChecksum = libnvml.DeviceGetInforomConfigurationChecksum - DeviceGetInforomImageVersion = libnvml.DeviceGetInforomImageVersion - DeviceGetInforomVersion = libnvml.DeviceGetInforomVersion - DeviceGetIrqNum = libnvml.DeviceGetIrqNum - DeviceGetMPSComputeRunningProcesses = libnvml.DeviceGetMPSComputeRunningProcesses - DeviceGetMaxClockInfo = libnvml.DeviceGetMaxClockInfo - DeviceGetMaxCustomerBoostClock = libnvml.DeviceGetMaxCustomerBoostClock - DeviceGetMaxMigDeviceCount = libnvml.DeviceGetMaxMigDeviceCount - DeviceGetMaxPcieLinkGeneration = libnvml.DeviceGetMaxPcieLinkGeneration - DeviceGetMaxPcieLinkWidth = libnvml.DeviceGetMaxPcieLinkWidth - DeviceGetMemClkMinMaxVfOffset = libnvml.DeviceGetMemClkMinMaxVfOffset - DeviceGetMemClkVfOffset = libnvml.DeviceGetMemClkVfOffset - DeviceGetMemoryAffinity = libnvml.DeviceGetMemoryAffinity - DeviceGetMemoryBusWidth = libnvml.DeviceGetMemoryBusWidth - DeviceGetMemoryErrorCounter = libnvml.DeviceGetMemoryErrorCounter - DeviceGetMemoryInfo = libnvml.DeviceGetMemoryInfo - DeviceGetMemoryInfo_v2 = libnvml.DeviceGetMemoryInfo_v2 - DeviceGetMigDeviceHandleByIndex = libnvml.DeviceGetMigDeviceHandleByIndex - DeviceGetMigMode = libnvml.DeviceGetMigMode - DeviceGetMinMaxClockOfPState = libnvml.DeviceGetMinMaxClockOfPState - DeviceGetMinMaxFanSpeed = libnvml.DeviceGetMinMaxFanSpeed - DeviceGetMinorNumber = libnvml.DeviceGetMinorNumber - DeviceGetMultiGpuBoard = libnvml.DeviceGetMultiGpuBoard - DeviceGetName = libnvml.DeviceGetName - DeviceGetNumFans = libnvml.DeviceGetNumFans - DeviceGetNumGpuCores = libnvml.DeviceGetNumGpuCores - DeviceGetNvLinkCapability = libnvml.DeviceGetNvLinkCapability - DeviceGetNvLinkErrorCounter = libnvml.DeviceGetNvLinkErrorCounter - DeviceGetNvLinkRemoteDeviceType = libnvml.DeviceGetNvLinkRemoteDeviceType - DeviceGetNvLinkRemotePciInfo = libnvml.DeviceGetNvLinkRemotePciInfo - DeviceGetNvLinkState = libnvml.DeviceGetNvLinkState - DeviceGetNvLinkUtilizationControl = libnvml.DeviceGetNvLinkUtilizationControl - DeviceGetNvLinkUtilizationCounter = libnvml.DeviceGetNvLinkUtilizationCounter - DeviceGetNvLinkVersion = libnvml.DeviceGetNvLinkVersion - DeviceGetP2PStatus = libnvml.DeviceGetP2PStatus - DeviceGetPciInfo = libnvml.DeviceGetPciInfo - DeviceGetPcieLinkMaxSpeed = libnvml.DeviceGetPcieLinkMaxSpeed - DeviceGetPcieReplayCounter = libnvml.DeviceGetPcieReplayCounter - DeviceGetPcieSpeed = libnvml.DeviceGetPcieSpeed - DeviceGetPcieThroughput = libnvml.DeviceGetPcieThroughput - DeviceGetPerformanceState = libnvml.DeviceGetPerformanceState - DeviceGetPersistenceMode = libnvml.DeviceGetPersistenceMode - DeviceGetPgpuMetadataString = libnvml.DeviceGetPgpuMetadataString - DeviceGetPowerManagementDefaultLimit = libnvml.DeviceGetPowerManagementDefaultLimit - DeviceGetPowerManagementLimit = libnvml.DeviceGetPowerManagementLimit - DeviceGetPowerManagementLimitConstraints = libnvml.DeviceGetPowerManagementLimitConstraints - DeviceGetPowerManagementMode = libnvml.DeviceGetPowerManagementMode - DeviceGetPowerSource = libnvml.DeviceGetPowerSource - DeviceGetPowerState = libnvml.DeviceGetPowerState - DeviceGetPowerUsage = libnvml.DeviceGetPowerUsage - DeviceGetProcessUtilization = libnvml.DeviceGetProcessUtilization - DeviceGetRemappedRows = libnvml.DeviceGetRemappedRows - DeviceGetRetiredPages = libnvml.DeviceGetRetiredPages - DeviceGetRetiredPagesPendingStatus = libnvml.DeviceGetRetiredPagesPendingStatus - DeviceGetRetiredPages_v2 = libnvml.DeviceGetRetiredPages_v2 - DeviceGetRowRemapperHistogram = libnvml.DeviceGetRowRemapperHistogram - DeviceGetSamples = libnvml.DeviceGetSamples - DeviceGetSerial = libnvml.DeviceGetSerial - DeviceGetSupportedClocksThrottleReasons = libnvml.DeviceGetSupportedClocksThrottleReasons - DeviceGetSupportedEventTypes = libnvml.DeviceGetSupportedEventTypes - DeviceGetSupportedGraphicsClocks = libnvml.DeviceGetSupportedGraphicsClocks - DeviceGetSupportedMemoryClocks = libnvml.DeviceGetSupportedMemoryClocks - DeviceGetSupportedPerformanceStates = libnvml.DeviceGetSupportedPerformanceStates - DeviceGetSupportedVgpus = libnvml.DeviceGetSupportedVgpus - DeviceGetTargetFanSpeed = libnvml.DeviceGetTargetFanSpeed - DeviceGetTemperature = libnvml.DeviceGetTemperature - DeviceGetTemperatureThreshold = libnvml.DeviceGetTemperatureThreshold - DeviceGetThermalSettings = libnvml.DeviceGetThermalSettings - DeviceGetTopologyCommonAncestor = libnvml.DeviceGetTopologyCommonAncestor - DeviceGetTopologyNearestGpus = libnvml.DeviceGetTopologyNearestGpus - DeviceGetTotalEccErrors = libnvml.DeviceGetTotalEccErrors - DeviceGetTotalEnergyConsumption = libnvml.DeviceGetTotalEnergyConsumption - DeviceGetUUID = libnvml.DeviceGetUUID - DeviceGetUtilizationRates = libnvml.DeviceGetUtilizationRates - DeviceGetVbiosVersion = libnvml.DeviceGetVbiosVersion - DeviceGetVgpuCapabilities = libnvml.DeviceGetVgpuCapabilities - DeviceGetVgpuMetadata = libnvml.DeviceGetVgpuMetadata - DeviceGetVgpuProcessUtilization = libnvml.DeviceGetVgpuProcessUtilization - DeviceGetVgpuSchedulerCapabilities = libnvml.DeviceGetVgpuSchedulerCapabilities - DeviceGetVgpuSchedulerLog = libnvml.DeviceGetVgpuSchedulerLog - DeviceGetVgpuSchedulerState = libnvml.DeviceGetVgpuSchedulerState - DeviceGetVgpuUtilization = libnvml.DeviceGetVgpuUtilization - DeviceGetViolationStatus = libnvml.DeviceGetViolationStatus - DeviceGetVirtualizationMode = libnvml.DeviceGetVirtualizationMode - DeviceIsMigDeviceHandle = libnvml.DeviceIsMigDeviceHandle - DeviceModifyDrainState = libnvml.DeviceModifyDrainState - DeviceOnSameBoard = libnvml.DeviceOnSameBoard - DeviceQueryDrainState = libnvml.DeviceQueryDrainState - DeviceRegisterEvents = libnvml.DeviceRegisterEvents - DeviceRemoveGpu = libnvml.DeviceRemoveGpu - DeviceRemoveGpu_v2 = libnvml.DeviceRemoveGpu_v2 - DeviceResetApplicationsClocks = libnvml.DeviceResetApplicationsClocks - DeviceResetGpuLockedClocks = libnvml.DeviceResetGpuLockedClocks - DeviceResetMemoryLockedClocks = libnvml.DeviceResetMemoryLockedClocks - DeviceResetNvLinkErrorCounters = libnvml.DeviceResetNvLinkErrorCounters - DeviceResetNvLinkUtilizationCounter = libnvml.DeviceResetNvLinkUtilizationCounter - DeviceSetAPIRestriction = libnvml.DeviceSetAPIRestriction - DeviceSetAccountingMode = libnvml.DeviceSetAccountingMode - DeviceSetApplicationsClocks = libnvml.DeviceSetApplicationsClocks - DeviceSetAutoBoostedClocksEnabled = libnvml.DeviceSetAutoBoostedClocksEnabled - DeviceSetComputeMode = libnvml.DeviceSetComputeMode - DeviceSetCpuAffinity = libnvml.DeviceSetCpuAffinity - DeviceSetDefaultAutoBoostedClocksEnabled = libnvml.DeviceSetDefaultAutoBoostedClocksEnabled - DeviceSetDefaultFanSpeed_v2 = libnvml.DeviceSetDefaultFanSpeed_v2 - DeviceSetDriverModel = libnvml.DeviceSetDriverModel - DeviceSetEccMode = libnvml.DeviceSetEccMode - DeviceSetFanControlPolicy = libnvml.DeviceSetFanControlPolicy - DeviceSetFanSpeed_v2 = libnvml.DeviceSetFanSpeed_v2 - DeviceSetGpcClkVfOffset = libnvml.DeviceSetGpcClkVfOffset - DeviceSetGpuLockedClocks = libnvml.DeviceSetGpuLockedClocks - DeviceSetGpuOperationMode = libnvml.DeviceSetGpuOperationMode - DeviceSetMemClkVfOffset = libnvml.DeviceSetMemClkVfOffset - DeviceSetMemoryLockedClocks = libnvml.DeviceSetMemoryLockedClocks - DeviceSetMigMode = libnvml.DeviceSetMigMode - DeviceSetNvLinkDeviceLowPowerThreshold = libnvml.DeviceSetNvLinkDeviceLowPowerThreshold - DeviceSetNvLinkUtilizationControl = libnvml.DeviceSetNvLinkUtilizationControl - DeviceSetPersistenceMode = libnvml.DeviceSetPersistenceMode - DeviceSetPowerManagementLimit = libnvml.DeviceSetPowerManagementLimit - DeviceSetTemperatureThreshold = libnvml.DeviceSetTemperatureThreshold - DeviceSetVgpuSchedulerState = libnvml.DeviceSetVgpuSchedulerState - DeviceSetVirtualizationMode = libnvml.DeviceSetVirtualizationMode - DeviceValidateInforom = libnvml.DeviceValidateInforom - ErrorString = libnvml.ErrorString - EventSetCreate = libnvml.EventSetCreate - EventSetFree = libnvml.EventSetFree - EventSetWait = libnvml.EventSetWait - Extensions = libnvml.Extensions - GetExcludedDeviceCount = libnvml.GetExcludedDeviceCount - GetExcludedDeviceInfoByIndex = libnvml.GetExcludedDeviceInfoByIndex - GetVgpuCompatibility = libnvml.GetVgpuCompatibility - GetVgpuDriverCapabilities = libnvml.GetVgpuDriverCapabilities - GetVgpuVersion = libnvml.GetVgpuVersion - GpmMetricsGet = libnvml.GpmMetricsGet - GpmMetricsGetV = libnvml.GpmMetricsGetV - GpmMigSampleGet = libnvml.GpmMigSampleGet - GpmQueryDeviceSupport = libnvml.GpmQueryDeviceSupport - GpmQueryDeviceSupportV = libnvml.GpmQueryDeviceSupportV - GpmSampleAlloc = libnvml.GpmSampleAlloc - GpmSampleFree = libnvml.GpmSampleFree - GpmSampleGet = libnvml.GpmSampleGet - GpuInstanceCreateComputeInstance = libnvml.GpuInstanceCreateComputeInstance - GpuInstanceCreateComputeInstanceWithPlacement = libnvml.GpuInstanceCreateComputeInstanceWithPlacement - GpuInstanceDestroy = libnvml.GpuInstanceDestroy - GpuInstanceGetComputeInstanceById = libnvml.GpuInstanceGetComputeInstanceById - GpuInstanceGetComputeInstancePossiblePlacements = libnvml.GpuInstanceGetComputeInstancePossiblePlacements - GpuInstanceGetComputeInstanceProfileInfo = libnvml.GpuInstanceGetComputeInstanceProfileInfo - GpuInstanceGetComputeInstanceProfileInfoV = libnvml.GpuInstanceGetComputeInstanceProfileInfoV - GpuInstanceGetComputeInstanceRemainingCapacity = libnvml.GpuInstanceGetComputeInstanceRemainingCapacity - GpuInstanceGetComputeInstances = libnvml.GpuInstanceGetComputeInstances - GpuInstanceGetInfo = libnvml.GpuInstanceGetInfo - Init = libnvml.Init - InitWithFlags = libnvml.InitWithFlags - SetVgpuVersion = libnvml.SetVgpuVersion - Shutdown = libnvml.Shutdown - SystemGetCudaDriverVersion = libnvml.SystemGetCudaDriverVersion - SystemGetCudaDriverVersion_v2 = libnvml.SystemGetCudaDriverVersion_v2 - SystemGetDriverVersion = libnvml.SystemGetDriverVersion - SystemGetHicVersion = libnvml.SystemGetHicVersion - SystemGetNVMLVersion = libnvml.SystemGetNVMLVersion - SystemGetProcessName = libnvml.SystemGetProcessName - SystemGetTopologyGpuSet = libnvml.SystemGetTopologyGpuSet - UnitGetCount = libnvml.UnitGetCount - UnitGetDevices = libnvml.UnitGetDevices - UnitGetFanSpeedInfo = libnvml.UnitGetFanSpeedInfo - UnitGetHandleByIndex = libnvml.UnitGetHandleByIndex - UnitGetLedState = libnvml.UnitGetLedState - UnitGetPsuInfo = libnvml.UnitGetPsuInfo - UnitGetTemperature = libnvml.UnitGetTemperature - UnitGetUnitInfo = libnvml.UnitGetUnitInfo - UnitSetLedState = libnvml.UnitSetLedState - VgpuInstanceClearAccountingPids = libnvml.VgpuInstanceClearAccountingPids - VgpuInstanceGetAccountingMode = libnvml.VgpuInstanceGetAccountingMode - VgpuInstanceGetAccountingPids = libnvml.VgpuInstanceGetAccountingPids - VgpuInstanceGetAccountingStats = libnvml.VgpuInstanceGetAccountingStats - VgpuInstanceGetEccMode = libnvml.VgpuInstanceGetEccMode - VgpuInstanceGetEncoderCapacity = libnvml.VgpuInstanceGetEncoderCapacity - VgpuInstanceGetEncoderSessions = libnvml.VgpuInstanceGetEncoderSessions - VgpuInstanceGetEncoderStats = libnvml.VgpuInstanceGetEncoderStats - VgpuInstanceGetFBCSessions = libnvml.VgpuInstanceGetFBCSessions - VgpuInstanceGetFBCStats = libnvml.VgpuInstanceGetFBCStats - VgpuInstanceGetFbUsage = libnvml.VgpuInstanceGetFbUsage - VgpuInstanceGetFrameRateLimit = libnvml.VgpuInstanceGetFrameRateLimit - VgpuInstanceGetGpuInstanceId = libnvml.VgpuInstanceGetGpuInstanceId - VgpuInstanceGetGpuPciId = libnvml.VgpuInstanceGetGpuPciId - VgpuInstanceGetLicenseInfo = libnvml.VgpuInstanceGetLicenseInfo - VgpuInstanceGetLicenseStatus = libnvml.VgpuInstanceGetLicenseStatus - VgpuInstanceGetMdevUUID = libnvml.VgpuInstanceGetMdevUUID - VgpuInstanceGetMetadata = libnvml.VgpuInstanceGetMetadata - VgpuInstanceGetType = libnvml.VgpuInstanceGetType - VgpuInstanceGetUUID = libnvml.VgpuInstanceGetUUID - VgpuInstanceGetVmDriverVersion = libnvml.VgpuInstanceGetVmDriverVersion - VgpuInstanceGetVmID = libnvml.VgpuInstanceGetVmID - VgpuInstanceSetEncoderCapacity = libnvml.VgpuInstanceSetEncoderCapacity - VgpuTypeGetCapabilities = libnvml.VgpuTypeGetCapabilities - VgpuTypeGetClass = libnvml.VgpuTypeGetClass - VgpuTypeGetDeviceID = libnvml.VgpuTypeGetDeviceID - VgpuTypeGetFrameRateLimit = libnvml.VgpuTypeGetFrameRateLimit - VgpuTypeGetFramebufferSize = libnvml.VgpuTypeGetFramebufferSize - VgpuTypeGetGpuInstanceProfileId = libnvml.VgpuTypeGetGpuInstanceProfileId - VgpuTypeGetLicense = libnvml.VgpuTypeGetLicense - VgpuTypeGetMaxInstances = libnvml.VgpuTypeGetMaxInstances - VgpuTypeGetMaxInstancesPerVm = libnvml.VgpuTypeGetMaxInstancesPerVm - VgpuTypeGetName = libnvml.VgpuTypeGetName - VgpuTypeGetNumDisplayHeads = libnvml.VgpuTypeGetNumDisplayHeads - VgpuTypeGetResolution = libnvml.VgpuTypeGetResolution + ComputeInstanceDestroy = libnvml.ComputeInstanceDestroy + ComputeInstanceGetInfo = libnvml.ComputeInstanceGetInfo + DeviceClearAccountingPids = libnvml.DeviceClearAccountingPids + DeviceClearCpuAffinity = libnvml.DeviceClearCpuAffinity + DeviceClearEccErrorCounts = libnvml.DeviceClearEccErrorCounts + DeviceClearFieldValues = libnvml.DeviceClearFieldValues + DeviceCreateGpuInstance = libnvml.DeviceCreateGpuInstance + DeviceCreateGpuInstanceWithPlacement = libnvml.DeviceCreateGpuInstanceWithPlacement + DeviceDiscoverGpus = libnvml.DeviceDiscoverGpus + DeviceFreezeNvLinkUtilizationCounter = libnvml.DeviceFreezeNvLinkUtilizationCounter + DeviceGetAPIRestriction = libnvml.DeviceGetAPIRestriction + DeviceGetAccountingBufferSize = libnvml.DeviceGetAccountingBufferSize + DeviceGetAccountingMode = libnvml.DeviceGetAccountingMode + DeviceGetAccountingPids = libnvml.DeviceGetAccountingPids + DeviceGetAccountingStats = libnvml.DeviceGetAccountingStats + DeviceGetActiveVgpus = libnvml.DeviceGetActiveVgpus + DeviceGetAdaptiveClockInfoStatus = libnvml.DeviceGetAdaptiveClockInfoStatus + DeviceGetApplicationsClock = libnvml.DeviceGetApplicationsClock + DeviceGetArchitecture = libnvml.DeviceGetArchitecture + DeviceGetAttributes = libnvml.DeviceGetAttributes + DeviceGetAutoBoostedClocksEnabled = libnvml.DeviceGetAutoBoostedClocksEnabled + DeviceGetBAR1MemoryInfo = libnvml.DeviceGetBAR1MemoryInfo + DeviceGetBoardId = libnvml.DeviceGetBoardId + DeviceGetBoardPartNumber = libnvml.DeviceGetBoardPartNumber + DeviceGetBrand = libnvml.DeviceGetBrand + DeviceGetBridgeChipInfo = libnvml.DeviceGetBridgeChipInfo + DeviceGetBusType = libnvml.DeviceGetBusType + DeviceGetC2cModeInfoV = libnvml.DeviceGetC2cModeInfoV + DeviceGetCapabilities = libnvml.DeviceGetCapabilities + DeviceGetClkMonStatus = libnvml.DeviceGetClkMonStatus + DeviceGetClock = libnvml.DeviceGetClock + DeviceGetClockInfo = libnvml.DeviceGetClockInfo + DeviceGetClockOffsets = libnvml.DeviceGetClockOffsets + DeviceGetComputeInstanceId = libnvml.DeviceGetComputeInstanceId + DeviceGetComputeMode = libnvml.DeviceGetComputeMode + DeviceGetComputeRunningProcesses = libnvml.DeviceGetComputeRunningProcesses + DeviceGetConfComputeGpuAttestationReport = libnvml.DeviceGetConfComputeGpuAttestationReport + DeviceGetConfComputeGpuCertificate = libnvml.DeviceGetConfComputeGpuCertificate + DeviceGetConfComputeMemSizeInfo = libnvml.DeviceGetConfComputeMemSizeInfo + DeviceGetConfComputeProtectedMemoryUsage = libnvml.DeviceGetConfComputeProtectedMemoryUsage + DeviceGetCoolerInfo = libnvml.DeviceGetCoolerInfo + DeviceGetCount = libnvml.DeviceGetCount + DeviceGetCpuAffinity = libnvml.DeviceGetCpuAffinity + DeviceGetCpuAffinityWithinScope = libnvml.DeviceGetCpuAffinityWithinScope + DeviceGetCreatableVgpus = libnvml.DeviceGetCreatableVgpus + DeviceGetCudaComputeCapability = libnvml.DeviceGetCudaComputeCapability + DeviceGetCurrPcieLinkGeneration = libnvml.DeviceGetCurrPcieLinkGeneration + DeviceGetCurrPcieLinkWidth = libnvml.DeviceGetCurrPcieLinkWidth + DeviceGetCurrentClockFreqs = libnvml.DeviceGetCurrentClockFreqs + DeviceGetCurrentClocksEventReasons = libnvml.DeviceGetCurrentClocksEventReasons + DeviceGetCurrentClocksThrottleReasons = libnvml.DeviceGetCurrentClocksThrottleReasons + DeviceGetDecoderUtilization = libnvml.DeviceGetDecoderUtilization + DeviceGetDefaultApplicationsClock = libnvml.DeviceGetDefaultApplicationsClock + DeviceGetDefaultEccMode = libnvml.DeviceGetDefaultEccMode + DeviceGetDetailedEccErrors = libnvml.DeviceGetDetailedEccErrors + DeviceGetDeviceHandleFromMigDeviceHandle = libnvml.DeviceGetDeviceHandleFromMigDeviceHandle + DeviceGetDisplayActive = libnvml.DeviceGetDisplayActive + DeviceGetDisplayMode = libnvml.DeviceGetDisplayMode + DeviceGetDramEncryptionMode = libnvml.DeviceGetDramEncryptionMode + DeviceGetDriverModel = libnvml.DeviceGetDriverModel + DeviceGetDriverModel_v2 = libnvml.DeviceGetDriverModel_v2 + DeviceGetDynamicPstatesInfo = libnvml.DeviceGetDynamicPstatesInfo + DeviceGetEccMode = libnvml.DeviceGetEccMode + DeviceGetEncoderCapacity = libnvml.DeviceGetEncoderCapacity + DeviceGetEncoderSessions = libnvml.DeviceGetEncoderSessions + DeviceGetEncoderStats = libnvml.DeviceGetEncoderStats + DeviceGetEncoderUtilization = libnvml.DeviceGetEncoderUtilization + DeviceGetEnforcedPowerLimit = libnvml.DeviceGetEnforcedPowerLimit + DeviceGetFBCSessions = libnvml.DeviceGetFBCSessions + DeviceGetFBCStats = libnvml.DeviceGetFBCStats + DeviceGetFanControlPolicy_v2 = libnvml.DeviceGetFanControlPolicy_v2 + DeviceGetFanSpeed = libnvml.DeviceGetFanSpeed + DeviceGetFanSpeedRPM = libnvml.DeviceGetFanSpeedRPM + DeviceGetFanSpeed_v2 = libnvml.DeviceGetFanSpeed_v2 + DeviceGetFieldValues = libnvml.DeviceGetFieldValues + DeviceGetGpcClkMinMaxVfOffset = libnvml.DeviceGetGpcClkMinMaxVfOffset + DeviceGetGpcClkVfOffset = libnvml.DeviceGetGpcClkVfOffset + DeviceGetGpuFabricInfo = libnvml.DeviceGetGpuFabricInfo + DeviceGetGpuFabricInfoV = libnvml.DeviceGetGpuFabricInfoV + DeviceGetGpuInstanceById = libnvml.DeviceGetGpuInstanceById + DeviceGetGpuInstanceId = libnvml.DeviceGetGpuInstanceId + DeviceGetGpuInstancePossiblePlacements = libnvml.DeviceGetGpuInstancePossiblePlacements + DeviceGetGpuInstanceProfileInfo = libnvml.DeviceGetGpuInstanceProfileInfo + DeviceGetGpuInstanceProfileInfoV = libnvml.DeviceGetGpuInstanceProfileInfoV + DeviceGetGpuInstanceRemainingCapacity = libnvml.DeviceGetGpuInstanceRemainingCapacity + DeviceGetGpuInstances = libnvml.DeviceGetGpuInstances + DeviceGetGpuMaxPcieLinkGeneration = libnvml.DeviceGetGpuMaxPcieLinkGeneration + DeviceGetGpuOperationMode = libnvml.DeviceGetGpuOperationMode + DeviceGetGraphicsRunningProcesses = libnvml.DeviceGetGraphicsRunningProcesses + DeviceGetGridLicensableFeatures = libnvml.DeviceGetGridLicensableFeatures + DeviceGetGspFirmwareMode = libnvml.DeviceGetGspFirmwareMode + DeviceGetGspFirmwareVersion = libnvml.DeviceGetGspFirmwareVersion + DeviceGetHandleByIndex = libnvml.DeviceGetHandleByIndex + DeviceGetHandleByPciBusId = libnvml.DeviceGetHandleByPciBusId + DeviceGetHandleBySerial = libnvml.DeviceGetHandleBySerial + DeviceGetHandleByUUID = libnvml.DeviceGetHandleByUUID + DeviceGetHandleByUUIDV = libnvml.DeviceGetHandleByUUIDV + DeviceGetHostVgpuMode = libnvml.DeviceGetHostVgpuMode + DeviceGetIndex = libnvml.DeviceGetIndex + DeviceGetInforomConfigurationChecksum = libnvml.DeviceGetInforomConfigurationChecksum + DeviceGetInforomImageVersion = libnvml.DeviceGetInforomImageVersion + DeviceGetInforomVersion = libnvml.DeviceGetInforomVersion + DeviceGetIrqNum = libnvml.DeviceGetIrqNum + DeviceGetJpgUtilization = libnvml.DeviceGetJpgUtilization + DeviceGetLastBBXFlushTime = libnvml.DeviceGetLastBBXFlushTime + DeviceGetMPSComputeRunningProcesses = libnvml.DeviceGetMPSComputeRunningProcesses + DeviceGetMarginTemperature = libnvml.DeviceGetMarginTemperature + DeviceGetMaxClockInfo = libnvml.DeviceGetMaxClockInfo + DeviceGetMaxCustomerBoostClock = libnvml.DeviceGetMaxCustomerBoostClock + DeviceGetMaxMigDeviceCount = libnvml.DeviceGetMaxMigDeviceCount + DeviceGetMaxPcieLinkGeneration = libnvml.DeviceGetMaxPcieLinkGeneration + DeviceGetMaxPcieLinkWidth = libnvml.DeviceGetMaxPcieLinkWidth + DeviceGetMemClkMinMaxVfOffset = libnvml.DeviceGetMemClkMinMaxVfOffset + DeviceGetMemClkVfOffset = libnvml.DeviceGetMemClkVfOffset + DeviceGetMemoryAffinity = libnvml.DeviceGetMemoryAffinity + DeviceGetMemoryBusWidth = libnvml.DeviceGetMemoryBusWidth + DeviceGetMemoryErrorCounter = libnvml.DeviceGetMemoryErrorCounter + DeviceGetMemoryInfo = libnvml.DeviceGetMemoryInfo + DeviceGetMemoryInfo_v2 = libnvml.DeviceGetMemoryInfo_v2 + DeviceGetMigDeviceHandleByIndex = libnvml.DeviceGetMigDeviceHandleByIndex + DeviceGetMigMode = libnvml.DeviceGetMigMode + DeviceGetMinMaxClockOfPState = libnvml.DeviceGetMinMaxClockOfPState + DeviceGetMinMaxFanSpeed = libnvml.DeviceGetMinMaxFanSpeed + DeviceGetMinorNumber = libnvml.DeviceGetMinorNumber + DeviceGetModuleId = libnvml.DeviceGetModuleId + DeviceGetMultiGpuBoard = libnvml.DeviceGetMultiGpuBoard + DeviceGetName = libnvml.DeviceGetName + DeviceGetNumFans = libnvml.DeviceGetNumFans + DeviceGetNumGpuCores = libnvml.DeviceGetNumGpuCores + DeviceGetNumaNodeId = libnvml.DeviceGetNumaNodeId + DeviceGetNvLinkCapability = libnvml.DeviceGetNvLinkCapability + DeviceGetNvLinkErrorCounter = libnvml.DeviceGetNvLinkErrorCounter + DeviceGetNvLinkRemoteDeviceType = libnvml.DeviceGetNvLinkRemoteDeviceType + DeviceGetNvLinkRemotePciInfo = libnvml.DeviceGetNvLinkRemotePciInfo + DeviceGetNvLinkState = libnvml.DeviceGetNvLinkState + DeviceGetNvLinkUtilizationControl = libnvml.DeviceGetNvLinkUtilizationControl + DeviceGetNvLinkUtilizationCounter = libnvml.DeviceGetNvLinkUtilizationCounter + DeviceGetNvLinkVersion = libnvml.DeviceGetNvLinkVersion + DeviceGetNvlinkBwMode = libnvml.DeviceGetNvlinkBwMode + DeviceGetNvlinkSupportedBwModes = libnvml.DeviceGetNvlinkSupportedBwModes + DeviceGetOfaUtilization = libnvml.DeviceGetOfaUtilization + DeviceGetP2PStatus = libnvml.DeviceGetP2PStatus + DeviceGetPciInfo = libnvml.DeviceGetPciInfo + DeviceGetPciInfoExt = libnvml.DeviceGetPciInfoExt + DeviceGetPcieLinkMaxSpeed = libnvml.DeviceGetPcieLinkMaxSpeed + DeviceGetPcieReplayCounter = libnvml.DeviceGetPcieReplayCounter + DeviceGetPcieSpeed = libnvml.DeviceGetPcieSpeed + DeviceGetPcieThroughput = libnvml.DeviceGetPcieThroughput + DeviceGetPerformanceModes = libnvml.DeviceGetPerformanceModes + DeviceGetPerformanceState = libnvml.DeviceGetPerformanceState + DeviceGetPersistenceMode = libnvml.DeviceGetPersistenceMode + DeviceGetPgpuMetadataString = libnvml.DeviceGetPgpuMetadataString + DeviceGetPlatformInfo = libnvml.DeviceGetPlatformInfo + DeviceGetPowerManagementDefaultLimit = libnvml.DeviceGetPowerManagementDefaultLimit + DeviceGetPowerManagementLimit = libnvml.DeviceGetPowerManagementLimit + DeviceGetPowerManagementLimitConstraints = libnvml.DeviceGetPowerManagementLimitConstraints + DeviceGetPowerManagementMode = libnvml.DeviceGetPowerManagementMode + DeviceGetPowerSource = libnvml.DeviceGetPowerSource + DeviceGetPowerState = libnvml.DeviceGetPowerState + DeviceGetPowerUsage = libnvml.DeviceGetPowerUsage + DeviceGetProcessUtilization = libnvml.DeviceGetProcessUtilization + DeviceGetProcessesUtilizationInfo = libnvml.DeviceGetProcessesUtilizationInfo + DeviceGetRemappedRows = libnvml.DeviceGetRemappedRows + DeviceGetRetiredPages = libnvml.DeviceGetRetiredPages + DeviceGetRetiredPagesPendingStatus = libnvml.DeviceGetRetiredPagesPendingStatus + DeviceGetRetiredPages_v2 = libnvml.DeviceGetRetiredPages_v2 + DeviceGetRowRemapperHistogram = libnvml.DeviceGetRowRemapperHistogram + DeviceGetRunningProcessDetailList = libnvml.DeviceGetRunningProcessDetailList + DeviceGetSamples = libnvml.DeviceGetSamples + DeviceGetSerial = libnvml.DeviceGetSerial + DeviceGetSramEccErrorStatus = libnvml.DeviceGetSramEccErrorStatus + DeviceGetSupportedClocksEventReasons = libnvml.DeviceGetSupportedClocksEventReasons + DeviceGetSupportedClocksThrottleReasons = libnvml.DeviceGetSupportedClocksThrottleReasons + DeviceGetSupportedEventTypes = libnvml.DeviceGetSupportedEventTypes + DeviceGetSupportedGraphicsClocks = libnvml.DeviceGetSupportedGraphicsClocks + DeviceGetSupportedMemoryClocks = libnvml.DeviceGetSupportedMemoryClocks + DeviceGetSupportedPerformanceStates = libnvml.DeviceGetSupportedPerformanceStates + DeviceGetSupportedVgpus = libnvml.DeviceGetSupportedVgpus + DeviceGetTargetFanSpeed = libnvml.DeviceGetTargetFanSpeed + DeviceGetTemperature = libnvml.DeviceGetTemperature + DeviceGetTemperatureThreshold = libnvml.DeviceGetTemperatureThreshold + DeviceGetTemperatureV = libnvml.DeviceGetTemperatureV + DeviceGetThermalSettings = libnvml.DeviceGetThermalSettings + DeviceGetTopologyCommonAncestor = libnvml.DeviceGetTopologyCommonAncestor + DeviceGetTopologyNearestGpus = libnvml.DeviceGetTopologyNearestGpus + DeviceGetTotalEccErrors = libnvml.DeviceGetTotalEccErrors + DeviceGetTotalEnergyConsumption = libnvml.DeviceGetTotalEnergyConsumption + DeviceGetUUID = libnvml.DeviceGetUUID + DeviceGetUtilizationRates = libnvml.DeviceGetUtilizationRates + DeviceGetVbiosVersion = libnvml.DeviceGetVbiosVersion + DeviceGetVgpuCapabilities = libnvml.DeviceGetVgpuCapabilities + DeviceGetVgpuHeterogeneousMode = libnvml.DeviceGetVgpuHeterogeneousMode + DeviceGetVgpuInstancesUtilizationInfo = libnvml.DeviceGetVgpuInstancesUtilizationInfo + DeviceGetVgpuMetadata = libnvml.DeviceGetVgpuMetadata + DeviceGetVgpuProcessUtilization = libnvml.DeviceGetVgpuProcessUtilization + DeviceGetVgpuProcessesUtilizationInfo = libnvml.DeviceGetVgpuProcessesUtilizationInfo + DeviceGetVgpuSchedulerCapabilities = libnvml.DeviceGetVgpuSchedulerCapabilities + DeviceGetVgpuSchedulerLog = libnvml.DeviceGetVgpuSchedulerLog + DeviceGetVgpuSchedulerState = libnvml.DeviceGetVgpuSchedulerState + DeviceGetVgpuTypeCreatablePlacements = libnvml.DeviceGetVgpuTypeCreatablePlacements + DeviceGetVgpuTypeSupportedPlacements = libnvml.DeviceGetVgpuTypeSupportedPlacements + DeviceGetVgpuUtilization = libnvml.DeviceGetVgpuUtilization + DeviceGetViolationStatus = libnvml.DeviceGetViolationStatus + DeviceGetVirtualizationMode = libnvml.DeviceGetVirtualizationMode + DeviceIsMigDeviceHandle = libnvml.DeviceIsMigDeviceHandle + DeviceModifyDrainState = libnvml.DeviceModifyDrainState + DeviceOnSameBoard = libnvml.DeviceOnSameBoard + DevicePowerSmoothingActivatePresetProfile = libnvml.DevicePowerSmoothingActivatePresetProfile + DevicePowerSmoothingSetState = libnvml.DevicePowerSmoothingSetState + DevicePowerSmoothingUpdatePresetProfileParam = libnvml.DevicePowerSmoothingUpdatePresetProfileParam + DeviceQueryDrainState = libnvml.DeviceQueryDrainState + DeviceRegisterEvents = libnvml.DeviceRegisterEvents + DeviceRemoveGpu = libnvml.DeviceRemoveGpu + DeviceRemoveGpu_v2 = libnvml.DeviceRemoveGpu_v2 + DeviceResetApplicationsClocks = libnvml.DeviceResetApplicationsClocks + DeviceResetGpuLockedClocks = libnvml.DeviceResetGpuLockedClocks + DeviceResetMemoryLockedClocks = libnvml.DeviceResetMemoryLockedClocks + DeviceResetNvLinkErrorCounters = libnvml.DeviceResetNvLinkErrorCounters + DeviceResetNvLinkUtilizationCounter = libnvml.DeviceResetNvLinkUtilizationCounter + DeviceSetAPIRestriction = libnvml.DeviceSetAPIRestriction + DeviceSetAccountingMode = libnvml.DeviceSetAccountingMode + DeviceSetApplicationsClocks = libnvml.DeviceSetApplicationsClocks + DeviceSetAutoBoostedClocksEnabled = libnvml.DeviceSetAutoBoostedClocksEnabled + DeviceSetClockOffsets = libnvml.DeviceSetClockOffsets + DeviceSetComputeMode = libnvml.DeviceSetComputeMode + DeviceSetConfComputeUnprotectedMemSize = libnvml.DeviceSetConfComputeUnprotectedMemSize + DeviceSetCpuAffinity = libnvml.DeviceSetCpuAffinity + DeviceSetDefaultAutoBoostedClocksEnabled = libnvml.DeviceSetDefaultAutoBoostedClocksEnabled + DeviceSetDefaultFanSpeed_v2 = libnvml.DeviceSetDefaultFanSpeed_v2 + DeviceSetDramEncryptionMode = libnvml.DeviceSetDramEncryptionMode + DeviceSetDriverModel = libnvml.DeviceSetDriverModel + DeviceSetEccMode = libnvml.DeviceSetEccMode + DeviceSetFanControlPolicy = libnvml.DeviceSetFanControlPolicy + DeviceSetFanSpeed_v2 = libnvml.DeviceSetFanSpeed_v2 + DeviceSetGpcClkVfOffset = libnvml.DeviceSetGpcClkVfOffset + DeviceSetGpuLockedClocks = libnvml.DeviceSetGpuLockedClocks + DeviceSetGpuOperationMode = libnvml.DeviceSetGpuOperationMode + DeviceSetMemClkVfOffset = libnvml.DeviceSetMemClkVfOffset + DeviceSetMemoryLockedClocks = libnvml.DeviceSetMemoryLockedClocks + DeviceSetMigMode = libnvml.DeviceSetMigMode + DeviceSetNvLinkDeviceLowPowerThreshold = libnvml.DeviceSetNvLinkDeviceLowPowerThreshold + DeviceSetNvLinkUtilizationControl = libnvml.DeviceSetNvLinkUtilizationControl + DeviceSetNvlinkBwMode = libnvml.DeviceSetNvlinkBwMode + DeviceSetPersistenceMode = libnvml.DeviceSetPersistenceMode + DeviceSetPowerManagementLimit = libnvml.DeviceSetPowerManagementLimit + DeviceSetPowerManagementLimit_v2 = libnvml.DeviceSetPowerManagementLimit_v2 + DeviceSetTemperatureThreshold = libnvml.DeviceSetTemperatureThreshold + DeviceSetVgpuCapabilities = libnvml.DeviceSetVgpuCapabilities + DeviceSetVgpuHeterogeneousMode = libnvml.DeviceSetVgpuHeterogeneousMode + DeviceSetVgpuSchedulerState = libnvml.DeviceSetVgpuSchedulerState + DeviceSetVirtualizationMode = libnvml.DeviceSetVirtualizationMode + DeviceValidateInforom = libnvml.DeviceValidateInforom + DeviceWorkloadPowerProfileClearRequestedProfiles = libnvml.DeviceWorkloadPowerProfileClearRequestedProfiles + DeviceWorkloadPowerProfileGetCurrentProfiles = libnvml.DeviceWorkloadPowerProfileGetCurrentProfiles + DeviceWorkloadPowerProfileGetProfilesInfo = libnvml.DeviceWorkloadPowerProfileGetProfilesInfo + DeviceWorkloadPowerProfileSetRequestedProfiles = libnvml.DeviceWorkloadPowerProfileSetRequestedProfiles + ErrorString = libnvml.ErrorString + EventSetCreate = libnvml.EventSetCreate + EventSetFree = libnvml.EventSetFree + EventSetWait = libnvml.EventSetWait + Extensions = libnvml.Extensions + GetExcludedDeviceCount = libnvml.GetExcludedDeviceCount + GetExcludedDeviceInfoByIndex = libnvml.GetExcludedDeviceInfoByIndex + GetVgpuCompatibility = libnvml.GetVgpuCompatibility + GetVgpuDriverCapabilities = libnvml.GetVgpuDriverCapabilities + GetVgpuVersion = libnvml.GetVgpuVersion + GpmMetricsGet = libnvml.GpmMetricsGet + GpmMetricsGetV = libnvml.GpmMetricsGetV + GpmMigSampleGet = libnvml.GpmMigSampleGet + GpmQueryDeviceSupport = libnvml.GpmQueryDeviceSupport + GpmQueryDeviceSupportV = libnvml.GpmQueryDeviceSupportV + GpmQueryIfStreamingEnabled = libnvml.GpmQueryIfStreamingEnabled + GpmSampleAlloc = libnvml.GpmSampleAlloc + GpmSampleFree = libnvml.GpmSampleFree + GpmSampleGet = libnvml.GpmSampleGet + GpmSetStreamingEnabled = libnvml.GpmSetStreamingEnabled + GpuInstanceCreateComputeInstance = libnvml.GpuInstanceCreateComputeInstance + GpuInstanceCreateComputeInstanceWithPlacement = libnvml.GpuInstanceCreateComputeInstanceWithPlacement + GpuInstanceDestroy = libnvml.GpuInstanceDestroy + GpuInstanceGetActiveVgpus = libnvml.GpuInstanceGetActiveVgpus + GpuInstanceGetComputeInstanceById = libnvml.GpuInstanceGetComputeInstanceById + GpuInstanceGetComputeInstancePossiblePlacements = libnvml.GpuInstanceGetComputeInstancePossiblePlacements + GpuInstanceGetComputeInstanceProfileInfo = libnvml.GpuInstanceGetComputeInstanceProfileInfo + GpuInstanceGetComputeInstanceProfileInfoV = libnvml.GpuInstanceGetComputeInstanceProfileInfoV + GpuInstanceGetComputeInstanceRemainingCapacity = libnvml.GpuInstanceGetComputeInstanceRemainingCapacity + GpuInstanceGetComputeInstances = libnvml.GpuInstanceGetComputeInstances + GpuInstanceGetCreatableVgpus = libnvml.GpuInstanceGetCreatableVgpus + GpuInstanceGetInfo = libnvml.GpuInstanceGetInfo + GpuInstanceGetVgpuHeterogeneousMode = libnvml.GpuInstanceGetVgpuHeterogeneousMode + GpuInstanceGetVgpuSchedulerLog = libnvml.GpuInstanceGetVgpuSchedulerLog + GpuInstanceGetVgpuSchedulerState = libnvml.GpuInstanceGetVgpuSchedulerState + GpuInstanceGetVgpuTypeCreatablePlacements = libnvml.GpuInstanceGetVgpuTypeCreatablePlacements + GpuInstanceSetVgpuHeterogeneousMode = libnvml.GpuInstanceSetVgpuHeterogeneousMode + GpuInstanceSetVgpuSchedulerState = libnvml.GpuInstanceSetVgpuSchedulerState + Init = libnvml.Init + InitWithFlags = libnvml.InitWithFlags + SetVgpuVersion = libnvml.SetVgpuVersion + Shutdown = libnvml.Shutdown + SystemEventSetCreate = libnvml.SystemEventSetCreate + SystemEventSetFree = libnvml.SystemEventSetFree + SystemEventSetWait = libnvml.SystemEventSetWait + SystemGetConfComputeCapabilities = libnvml.SystemGetConfComputeCapabilities + SystemGetConfComputeGpusReadyState = libnvml.SystemGetConfComputeGpusReadyState + SystemGetConfComputeKeyRotationThresholdInfo = libnvml.SystemGetConfComputeKeyRotationThresholdInfo + SystemGetConfComputeSettings = libnvml.SystemGetConfComputeSettings + SystemGetConfComputeState = libnvml.SystemGetConfComputeState + SystemGetCudaDriverVersion = libnvml.SystemGetCudaDriverVersion + SystemGetCudaDriverVersion_v2 = libnvml.SystemGetCudaDriverVersion_v2 + SystemGetDriverBranch = libnvml.SystemGetDriverBranch + SystemGetDriverVersion = libnvml.SystemGetDriverVersion + SystemGetHicVersion = libnvml.SystemGetHicVersion + SystemGetNVMLVersion = libnvml.SystemGetNVMLVersion + SystemGetNvlinkBwMode = libnvml.SystemGetNvlinkBwMode + SystemGetProcessName = libnvml.SystemGetProcessName + SystemGetTopologyGpuSet = libnvml.SystemGetTopologyGpuSet + SystemRegisterEvents = libnvml.SystemRegisterEvents + SystemSetConfComputeGpusReadyState = libnvml.SystemSetConfComputeGpusReadyState + SystemSetConfComputeKeyRotationThresholdInfo = libnvml.SystemSetConfComputeKeyRotationThresholdInfo + SystemSetNvlinkBwMode = libnvml.SystemSetNvlinkBwMode + UnitGetCount = libnvml.UnitGetCount + UnitGetDevices = libnvml.UnitGetDevices + UnitGetFanSpeedInfo = libnvml.UnitGetFanSpeedInfo + UnitGetHandleByIndex = libnvml.UnitGetHandleByIndex + UnitGetLedState = libnvml.UnitGetLedState + UnitGetPsuInfo = libnvml.UnitGetPsuInfo + UnitGetTemperature = libnvml.UnitGetTemperature + UnitGetUnitInfo = libnvml.UnitGetUnitInfo + UnitSetLedState = libnvml.UnitSetLedState + VgpuInstanceClearAccountingPids = libnvml.VgpuInstanceClearAccountingPids + VgpuInstanceGetAccountingMode = libnvml.VgpuInstanceGetAccountingMode + VgpuInstanceGetAccountingPids = libnvml.VgpuInstanceGetAccountingPids + VgpuInstanceGetAccountingStats = libnvml.VgpuInstanceGetAccountingStats + VgpuInstanceGetEccMode = libnvml.VgpuInstanceGetEccMode + VgpuInstanceGetEncoderCapacity = libnvml.VgpuInstanceGetEncoderCapacity + VgpuInstanceGetEncoderSessions = libnvml.VgpuInstanceGetEncoderSessions + VgpuInstanceGetEncoderStats = libnvml.VgpuInstanceGetEncoderStats + VgpuInstanceGetFBCSessions = libnvml.VgpuInstanceGetFBCSessions + VgpuInstanceGetFBCStats = libnvml.VgpuInstanceGetFBCStats + VgpuInstanceGetFbUsage = libnvml.VgpuInstanceGetFbUsage + VgpuInstanceGetFrameRateLimit = libnvml.VgpuInstanceGetFrameRateLimit + VgpuInstanceGetGpuInstanceId = libnvml.VgpuInstanceGetGpuInstanceId + VgpuInstanceGetGpuPciId = libnvml.VgpuInstanceGetGpuPciId + VgpuInstanceGetLicenseInfo = libnvml.VgpuInstanceGetLicenseInfo + VgpuInstanceGetLicenseStatus = libnvml.VgpuInstanceGetLicenseStatus + VgpuInstanceGetMdevUUID = libnvml.VgpuInstanceGetMdevUUID + VgpuInstanceGetMetadata = libnvml.VgpuInstanceGetMetadata + VgpuInstanceGetRuntimeStateSize = libnvml.VgpuInstanceGetRuntimeStateSize + VgpuInstanceGetType = libnvml.VgpuInstanceGetType + VgpuInstanceGetUUID = libnvml.VgpuInstanceGetUUID + VgpuInstanceGetVmDriverVersion = libnvml.VgpuInstanceGetVmDriverVersion + VgpuInstanceGetVmID = libnvml.VgpuInstanceGetVmID + VgpuInstanceSetEncoderCapacity = libnvml.VgpuInstanceSetEncoderCapacity + VgpuTypeGetBAR1Info = libnvml.VgpuTypeGetBAR1Info + VgpuTypeGetCapabilities = libnvml.VgpuTypeGetCapabilities + VgpuTypeGetClass = libnvml.VgpuTypeGetClass + VgpuTypeGetDeviceID = libnvml.VgpuTypeGetDeviceID + VgpuTypeGetFrameRateLimit = libnvml.VgpuTypeGetFrameRateLimit + VgpuTypeGetFramebufferSize = libnvml.VgpuTypeGetFramebufferSize + VgpuTypeGetGpuInstanceProfileId = libnvml.VgpuTypeGetGpuInstanceProfileId + VgpuTypeGetLicense = libnvml.VgpuTypeGetLicense + VgpuTypeGetMaxInstances = libnvml.VgpuTypeGetMaxInstances + VgpuTypeGetMaxInstancesPerGpuInstance = libnvml.VgpuTypeGetMaxInstancesPerGpuInstance + VgpuTypeGetMaxInstancesPerVm = libnvml.VgpuTypeGetMaxInstancesPerVm + VgpuTypeGetName = libnvml.VgpuTypeGetName + VgpuTypeGetNumDisplayHeads = libnvml.VgpuTypeGetNumDisplayHeads + VgpuTypeGetResolution = libnvml.VgpuTypeGetResolution ) // Interface represents the interface for the library type. @@ -319,8 +394,6 @@ var ( type Interface interface { ComputeInstanceDestroy(ComputeInstance) Return ComputeInstanceGetInfo(ComputeInstance) (ComputeInstanceInfo, Return) - DeviceCcuGetStreamState(Device) (int, Return) - DeviceCcuSetStreamState(Device, int) Return DeviceClearAccountingPids(Device) Return DeviceClearCpuAffinity(Device) Return DeviceClearEccErrorCounts(Device, EccCounterType) Return @@ -346,12 +419,20 @@ type Interface interface { DeviceGetBrand(Device) (BrandType, Return) DeviceGetBridgeChipInfo(Device) (BridgeChipHierarchy, Return) DeviceGetBusType(Device) (BusType, Return) + DeviceGetC2cModeInfoV(Device) C2cModeInfoHandler + DeviceGetCapabilities(Device) (DeviceCapabilities, Return) DeviceGetClkMonStatus(Device) (ClkMonStatus, Return) DeviceGetClock(Device, ClockType, ClockId) (uint32, Return) DeviceGetClockInfo(Device, ClockType) (uint32, Return) + DeviceGetClockOffsets(Device) (ClockOffset, Return) DeviceGetComputeInstanceId(Device) (int, Return) DeviceGetComputeMode(Device) (ComputeMode, Return) DeviceGetComputeRunningProcesses(Device) ([]ProcessInfo, Return) + DeviceGetConfComputeGpuAttestationReport(Device) (ConfComputeGpuAttestationReport, Return) + DeviceGetConfComputeGpuCertificate(Device) (ConfComputeGpuCertificate, Return) + DeviceGetConfComputeMemSizeInfo(Device) (ConfComputeMemSizeInfo, Return) + DeviceGetConfComputeProtectedMemoryUsage(Device) (Memory, Return) + DeviceGetCoolerInfo(Device) (CoolerInfo, Return) DeviceGetCount() (int, Return) DeviceGetCpuAffinity(Device, int) ([]uint, Return) DeviceGetCpuAffinityWithinScope(Device, int, AffinityScope) ([]uint, Return) @@ -359,6 +440,8 @@ type Interface interface { DeviceGetCudaComputeCapability(Device) (int, int, Return) DeviceGetCurrPcieLinkGeneration(Device) (int, Return) DeviceGetCurrPcieLinkWidth(Device) (int, Return) + DeviceGetCurrentClockFreqs(Device) (DeviceCurrentClockFreqs, Return) + DeviceGetCurrentClocksEventReasons(Device) (uint64, Return) DeviceGetCurrentClocksThrottleReasons(Device) (uint64, Return) DeviceGetDecoderUtilization(Device) (uint32, uint32, Return) DeviceGetDefaultApplicationsClock(Device, ClockType) (uint32, Return) @@ -367,7 +450,9 @@ type Interface interface { DeviceGetDeviceHandleFromMigDeviceHandle(Device) (Device, Return) DeviceGetDisplayActive(Device) (EnableState, Return) DeviceGetDisplayMode(Device) (EnableState, Return) + DeviceGetDramEncryptionMode(Device) (DramEncryptionInfo, DramEncryptionInfo, Return) DeviceGetDriverModel(Device) (DriverModel, DriverModel, Return) + DeviceGetDriverModel_v2(Device) (DriverModel, DriverModel, Return) DeviceGetDynamicPstatesInfo(Device) (GpuDynamicPstatesInfo, Return) DeviceGetEccMode(Device) (EnableState, EnableState, Return) DeviceGetEncoderCapacity(Device, EncoderType) (int, Return) @@ -379,16 +464,18 @@ type Interface interface { DeviceGetFBCStats(Device) (FBCStats, Return) DeviceGetFanControlPolicy_v2(Device, int) (FanControlPolicy, Return) DeviceGetFanSpeed(Device) (uint32, Return) + DeviceGetFanSpeedRPM(Device) (FanSpeedInfo, Return) DeviceGetFanSpeed_v2(Device, int) (uint32, Return) DeviceGetFieldValues(Device, []FieldValue) Return DeviceGetGpcClkMinMaxVfOffset(Device) (int, int, Return) DeviceGetGpcClkVfOffset(Device) (int, Return) DeviceGetGpuFabricInfo(Device) (GpuFabricInfo, Return) + DeviceGetGpuFabricInfoV(Device) GpuFabricInfoHandler DeviceGetGpuInstanceById(Device, int) (GpuInstance, Return) DeviceGetGpuInstanceId(Device) (int, Return) DeviceGetGpuInstancePossiblePlacements(Device, *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) DeviceGetGpuInstanceProfileInfo(Device, int) (GpuInstanceProfileInfo, Return) - DeviceGetGpuInstanceProfileInfoV(Device, int) GpuInstanceProfileInfoV + DeviceGetGpuInstanceProfileInfoV(Device, int) GpuInstanceProfileInfoHandler DeviceGetGpuInstanceRemainingCapacity(Device, *GpuInstanceProfileInfo) (int, Return) DeviceGetGpuInstances(Device, *GpuInstanceProfileInfo) ([]GpuInstance, Return) DeviceGetGpuMaxPcieLinkGeneration(Device) (int, Return) @@ -401,13 +488,17 @@ type Interface interface { DeviceGetHandleByPciBusId(string) (Device, Return) DeviceGetHandleBySerial(string) (Device, Return) DeviceGetHandleByUUID(string) (Device, Return) + DeviceGetHandleByUUIDV(*UUID) (Device, Return) DeviceGetHostVgpuMode(Device) (HostVgpuMode, Return) DeviceGetIndex(Device) (int, Return) DeviceGetInforomConfigurationChecksum(Device) (uint32, Return) DeviceGetInforomImageVersion(Device) (string, Return) DeviceGetInforomVersion(Device, InforomObject) (string, Return) DeviceGetIrqNum(Device) (int, Return) + DeviceGetJpgUtilization(Device) (uint32, uint32, Return) + DeviceGetLastBBXFlushTime(Device) (uint64, uint, Return) DeviceGetMPSComputeRunningProcesses(Device) ([]ProcessInfo, Return) + DeviceGetMarginTemperature(Device) (MarginTemperature, Return) DeviceGetMaxClockInfo(Device, ClockType) (uint32, Return) DeviceGetMaxCustomerBoostClock(Device, ClockType) (uint32, Return) DeviceGetMaxMigDeviceCount(Device) (int, Return) @@ -425,10 +516,12 @@ type Interface interface { DeviceGetMinMaxClockOfPState(Device, ClockType, Pstates) (uint32, uint32, Return) DeviceGetMinMaxFanSpeed(Device) (int, int, Return) DeviceGetMinorNumber(Device) (int, Return) + DeviceGetModuleId(Device) (int, Return) DeviceGetMultiGpuBoard(Device) (int, Return) DeviceGetName(Device) (string, Return) DeviceGetNumFans(Device) (int, Return) DeviceGetNumGpuCores(Device) (int, Return) + DeviceGetNumaNodeId(Device) (int, Return) DeviceGetNvLinkCapability(Device, int, NvLinkCapability) (uint32, Return) DeviceGetNvLinkErrorCounter(Device, int, NvLinkErrorCounter) (uint64, Return) DeviceGetNvLinkRemoteDeviceType(Device, int) (IntNvLinkDeviceType, Return) @@ -437,15 +530,21 @@ type Interface interface { DeviceGetNvLinkUtilizationControl(Device, int, int) (NvLinkUtilizationControl, Return) DeviceGetNvLinkUtilizationCounter(Device, int, int) (uint64, uint64, Return) DeviceGetNvLinkVersion(Device, int) (uint32, Return) + DeviceGetNvlinkBwMode(Device) (NvlinkGetBwMode, Return) + DeviceGetNvlinkSupportedBwModes(Device) (NvlinkSupportedBwModes, Return) + DeviceGetOfaUtilization(Device) (uint32, uint32, Return) DeviceGetP2PStatus(Device, Device, GpuP2PCapsIndex) (GpuP2PStatus, Return) DeviceGetPciInfo(Device) (PciInfo, Return) + DeviceGetPciInfoExt(Device) (PciInfoExt, Return) DeviceGetPcieLinkMaxSpeed(Device) (uint32, Return) DeviceGetPcieReplayCounter(Device) (int, Return) DeviceGetPcieSpeed(Device) (int, Return) DeviceGetPcieThroughput(Device, PcieUtilCounter) (uint32, Return) + DeviceGetPerformanceModes(Device) (DevicePerfModes, Return) DeviceGetPerformanceState(Device) (Pstates, Return) DeviceGetPersistenceMode(Device) (EnableState, Return) DeviceGetPgpuMetadataString(Device) (string, Return) + DeviceGetPlatformInfo(Device) (PlatformInfo, Return) DeviceGetPowerManagementDefaultLimit(Device) (uint32, Return) DeviceGetPowerManagementLimit(Device) (uint32, Return) DeviceGetPowerManagementLimitConstraints(Device) (uint32, uint32, Return) @@ -454,13 +553,17 @@ type Interface interface { DeviceGetPowerState(Device) (Pstates, Return) DeviceGetPowerUsage(Device) (uint32, Return) DeviceGetProcessUtilization(Device, uint64) ([]ProcessUtilizationSample, Return) + DeviceGetProcessesUtilizationInfo(Device) (ProcessesUtilizationInfo, Return) DeviceGetRemappedRows(Device) (int, int, bool, bool, Return) DeviceGetRetiredPages(Device, PageRetirementCause) ([]uint64, Return) DeviceGetRetiredPagesPendingStatus(Device) (EnableState, Return) DeviceGetRetiredPages_v2(Device, PageRetirementCause) ([]uint64, []uint64, Return) DeviceGetRowRemapperHistogram(Device) (RowRemapperHistogramValues, Return) + DeviceGetRunningProcessDetailList(Device) (ProcessDetailList, Return) DeviceGetSamples(Device, SamplingType, uint64) (ValueType, []Sample, Return) DeviceGetSerial(Device) (string, Return) + DeviceGetSramEccErrorStatus(Device) (EccSramErrorStatus, Return) + DeviceGetSupportedClocksEventReasons(Device) (uint64, Return) DeviceGetSupportedClocksThrottleReasons(Device) (uint64, Return) DeviceGetSupportedEventTypes(Device) (uint64, Return) DeviceGetSupportedGraphicsClocks(Device, int) (int, uint32, Return) @@ -470,6 +573,7 @@ type Interface interface { DeviceGetTargetFanSpeed(Device, int) (int, Return) DeviceGetTemperature(Device, TemperatureSensors) (uint32, Return) DeviceGetTemperatureThreshold(Device, TemperatureThresholds) (uint32, Return) + DeviceGetTemperatureV(Device) TemperatureHandler DeviceGetThermalSettings(Device, uint32) (GpuThermalSettings, Return) DeviceGetTopologyCommonAncestor(Device, Device) (GpuTopologyLevel, Return) DeviceGetTopologyNearestGpus(Device, GpuTopologyLevel) ([]Device, Return) @@ -479,17 +583,25 @@ type Interface interface { DeviceGetUtilizationRates(Device) (Utilization, Return) DeviceGetVbiosVersion(Device) (string, Return) DeviceGetVgpuCapabilities(Device, DeviceVgpuCapability) (bool, Return) + DeviceGetVgpuHeterogeneousMode(Device) (VgpuHeterogeneousMode, Return) + DeviceGetVgpuInstancesUtilizationInfo(Device) (VgpuInstancesUtilizationInfo, Return) DeviceGetVgpuMetadata(Device) (VgpuPgpuMetadata, Return) DeviceGetVgpuProcessUtilization(Device, uint64) ([]VgpuProcessUtilizationSample, Return) + DeviceGetVgpuProcessesUtilizationInfo(Device) (VgpuProcessesUtilizationInfo, Return) DeviceGetVgpuSchedulerCapabilities(Device) (VgpuSchedulerCapabilities, Return) DeviceGetVgpuSchedulerLog(Device) (VgpuSchedulerLog, Return) DeviceGetVgpuSchedulerState(Device) (VgpuSchedulerGetState, Return) + DeviceGetVgpuTypeCreatablePlacements(Device, VgpuTypeId) (VgpuPlacementList, Return) + DeviceGetVgpuTypeSupportedPlacements(Device, VgpuTypeId) (VgpuPlacementList, Return) DeviceGetVgpuUtilization(Device, uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) DeviceGetViolationStatus(Device, PerfPolicyType) (ViolationTime, Return) DeviceGetVirtualizationMode(Device) (GpuVirtualizationMode, Return) DeviceIsMigDeviceHandle(Device) (bool, Return) DeviceModifyDrainState(*PciInfo, EnableState) Return DeviceOnSameBoard(Device, Device) (int, Return) + DevicePowerSmoothingActivatePresetProfile(Device, *PowerSmoothingProfile) Return + DevicePowerSmoothingSetState(Device, *PowerSmoothingState) Return + DevicePowerSmoothingUpdatePresetProfileParam(Device, *PowerSmoothingProfile) Return DeviceQueryDrainState(*PciInfo) (EnableState, Return) DeviceRegisterEvents(Device, uint64, EventSet) Return DeviceRemoveGpu(*PciInfo) Return @@ -503,10 +615,13 @@ type Interface interface { DeviceSetAccountingMode(Device, EnableState) Return DeviceSetApplicationsClocks(Device, uint32, uint32) Return DeviceSetAutoBoostedClocksEnabled(Device, EnableState) Return + DeviceSetClockOffsets(Device, ClockOffset) Return DeviceSetComputeMode(Device, ComputeMode) Return + DeviceSetConfComputeUnprotectedMemSize(Device, uint64) Return DeviceSetCpuAffinity(Device) Return DeviceSetDefaultAutoBoostedClocksEnabled(Device, EnableState, uint32) Return DeviceSetDefaultFanSpeed_v2(Device, int) Return + DeviceSetDramEncryptionMode(Device, *DramEncryptionInfo) Return DeviceSetDriverModel(Device, DriverModel, uint32) Return DeviceSetEccMode(Device, EnableState) Return DeviceSetFanControlPolicy(Device, int, FanControlPolicy) Return @@ -519,12 +634,20 @@ type Interface interface { DeviceSetMigMode(Device, int) (Return, Return) DeviceSetNvLinkDeviceLowPowerThreshold(Device, *NvLinkPowerThres) Return DeviceSetNvLinkUtilizationControl(Device, int, int, *NvLinkUtilizationControl, bool) Return + DeviceSetNvlinkBwMode(Device, *NvlinkSetBwMode) Return DeviceSetPersistenceMode(Device, EnableState) Return DeviceSetPowerManagementLimit(Device, uint32) Return + DeviceSetPowerManagementLimit_v2(Device, *PowerValue_v2) Return DeviceSetTemperatureThreshold(Device, TemperatureThresholds, int) Return + DeviceSetVgpuCapabilities(Device, DeviceVgpuCapability, EnableState) Return + DeviceSetVgpuHeterogeneousMode(Device, VgpuHeterogeneousMode) Return DeviceSetVgpuSchedulerState(Device, *VgpuSchedulerSetState) Return DeviceSetVirtualizationMode(Device, GpuVirtualizationMode) Return DeviceValidateInforom(Device) Return + DeviceWorkloadPowerProfileClearRequestedProfiles(Device, *WorkloadPowerProfileRequestedProfiles) Return + DeviceWorkloadPowerProfileGetCurrentProfiles(Device) (WorkloadPowerProfileCurrentProfiles, Return) + DeviceWorkloadPowerProfileGetProfilesInfo(Device) (WorkloadPowerProfileProfilesInfo, Return) + DeviceWorkloadPowerProfileSetRequestedProfiles(Device, *WorkloadPowerProfileRequestedProfiles) Return ErrorString(Return) string EventSetCreate() (EventSet, Return) EventSetFree(EventSet) Return @@ -540,30 +663,54 @@ type Interface interface { GpmMigSampleGet(Device, int, GpmSample) Return GpmQueryDeviceSupport(Device) (GpmSupport, Return) GpmQueryDeviceSupportV(Device) GpmSupportV + GpmQueryIfStreamingEnabled(Device) (uint32, Return) GpmSampleAlloc() (GpmSample, Return) GpmSampleFree(GpmSample) Return GpmSampleGet(Device, GpmSample) Return + GpmSetStreamingEnabled(Device, uint32) Return GpuInstanceCreateComputeInstance(GpuInstance, *ComputeInstanceProfileInfo) (ComputeInstance, Return) GpuInstanceCreateComputeInstanceWithPlacement(GpuInstance, *ComputeInstanceProfileInfo, *ComputeInstancePlacement) (ComputeInstance, Return) GpuInstanceDestroy(GpuInstance) Return + GpuInstanceGetActiveVgpus(GpuInstance) (ActiveVgpuInstanceInfo, Return) GpuInstanceGetComputeInstanceById(GpuInstance, int) (ComputeInstance, Return) GpuInstanceGetComputeInstancePossiblePlacements(GpuInstance, *ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) GpuInstanceGetComputeInstanceProfileInfo(GpuInstance, int, int) (ComputeInstanceProfileInfo, Return) - GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance, int, int) ComputeInstanceProfileInfoV + GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance, int, int) ComputeInstanceProfileInfoHandler GpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance, *ComputeInstanceProfileInfo) (int, Return) GpuInstanceGetComputeInstances(GpuInstance, *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) + GpuInstanceGetCreatableVgpus(GpuInstance) (VgpuTypeIdInfo, Return) GpuInstanceGetInfo(GpuInstance) (GpuInstanceInfo, Return) + GpuInstanceGetVgpuHeterogeneousMode(GpuInstance) (VgpuHeterogeneousMode, Return) + GpuInstanceGetVgpuSchedulerLog(GpuInstance) (VgpuSchedulerLogInfo, Return) + GpuInstanceGetVgpuSchedulerState(GpuInstance) (VgpuSchedulerStateInfo, Return) + GpuInstanceGetVgpuTypeCreatablePlacements(GpuInstance) (VgpuCreatablePlacementInfo, Return) + GpuInstanceSetVgpuHeterogeneousMode(GpuInstance, *VgpuHeterogeneousMode) Return + GpuInstanceSetVgpuSchedulerState(GpuInstance, *VgpuSchedulerState) Return Init() Return InitWithFlags(uint32) Return SetVgpuVersion(*VgpuVersion) Return Shutdown() Return + SystemEventSetCreate(*SystemEventSetCreateRequest) Return + SystemEventSetFree(*SystemEventSetFreeRequest) Return + SystemEventSetWait(*SystemEventSetWaitRequest) Return + SystemGetConfComputeCapabilities() (ConfComputeSystemCaps, Return) + SystemGetConfComputeGpusReadyState() (uint32, Return) + SystemGetConfComputeKeyRotationThresholdInfo() (ConfComputeGetKeyRotationThresholdInfo, Return) + SystemGetConfComputeSettings() (SystemConfComputeSettings, Return) + SystemGetConfComputeState() (ConfComputeSystemState, Return) SystemGetCudaDriverVersion() (int, Return) SystemGetCudaDriverVersion_v2() (int, Return) + SystemGetDriverBranch() (SystemDriverBranchInfo, Return) SystemGetDriverVersion() (string, Return) SystemGetHicVersion() ([]HwbcEntry, Return) SystemGetNVMLVersion() (string, Return) + SystemGetNvlinkBwMode() (uint32, Return) SystemGetProcessName(int) (string, Return) SystemGetTopologyGpuSet(int) ([]Device, Return) + SystemRegisterEvents(*SystemRegisterEventRequest) Return + SystemSetConfComputeGpusReadyState(uint32) Return + SystemSetConfComputeKeyRotationThresholdInfo(ConfComputeSetKeyRotationThresholdInfo) Return + SystemSetNvlinkBwMode(uint32) Return UnitGetCount() (int, Return) UnitGetDevices(Unit) ([]Device, Return) UnitGetFanSpeedInfo(Unit) (UnitFanSpeeds, Return) @@ -591,11 +738,13 @@ type Interface interface { VgpuInstanceGetLicenseStatus(VgpuInstance) (int, Return) VgpuInstanceGetMdevUUID(VgpuInstance) (string, Return) VgpuInstanceGetMetadata(VgpuInstance) (VgpuMetadata, Return) + VgpuInstanceGetRuntimeStateSize(VgpuInstance) (VgpuRuntimeState, Return) VgpuInstanceGetType(VgpuInstance) (VgpuTypeId, Return) VgpuInstanceGetUUID(VgpuInstance) (string, Return) VgpuInstanceGetVmDriverVersion(VgpuInstance) (string, Return) VgpuInstanceGetVmID(VgpuInstance) (string, VgpuVmIdType, Return) VgpuInstanceSetEncoderCapacity(VgpuInstance, int) Return + VgpuTypeGetBAR1Info(VgpuTypeId) (VgpuTypeBar1Info, Return) VgpuTypeGetCapabilities(VgpuTypeId, VgpuCapability) (bool, Return) VgpuTypeGetClass(VgpuTypeId) (string, Return) VgpuTypeGetDeviceID(VgpuTypeId) (uint64, uint64, Return) @@ -604,6 +753,7 @@ type Interface interface { VgpuTypeGetGpuInstanceProfileId(VgpuTypeId) (uint32, Return) VgpuTypeGetLicense(VgpuTypeId) (string, Return) VgpuTypeGetMaxInstances(Device, VgpuTypeId) (int, Return) + VgpuTypeGetMaxInstancesPerGpuInstance(*VgpuTypeMaxInstance) Return VgpuTypeGetMaxInstancesPerVm(VgpuTypeId) (int, Return) VgpuTypeGetName(VgpuTypeId) (string, Return) VgpuTypeGetNumDisplayHeads(VgpuTypeId) (int, Return) @@ -614,8 +764,6 @@ type Interface interface { // //go:generate moq -out mock/device.go -pkg mock . Device:Device type Device interface { - CcuGetStreamState() (int, Return) - CcuSetStreamState(int) Return ClearAccountingPids() Return ClearCpuAffinity() Return ClearEccErrorCounts(EccCounterType) Return @@ -640,18 +788,28 @@ type Device interface { GetBrand() (BrandType, Return) GetBridgeChipInfo() (BridgeChipHierarchy, Return) GetBusType() (BusType, Return) + GetC2cModeInfoV() C2cModeInfoHandler + GetCapabilities() (DeviceCapabilities, Return) GetClkMonStatus() (ClkMonStatus, Return) GetClock(ClockType, ClockId) (uint32, Return) GetClockInfo(ClockType) (uint32, Return) + GetClockOffsets() (ClockOffset, Return) GetComputeInstanceId() (int, Return) GetComputeMode() (ComputeMode, Return) GetComputeRunningProcesses() ([]ProcessInfo, Return) + GetConfComputeGpuAttestationReport() (ConfComputeGpuAttestationReport, Return) + GetConfComputeGpuCertificate() (ConfComputeGpuCertificate, Return) + GetConfComputeMemSizeInfo() (ConfComputeMemSizeInfo, Return) + GetConfComputeProtectedMemoryUsage() (Memory, Return) + GetCoolerInfo() (CoolerInfo, Return) GetCpuAffinity(int) ([]uint, Return) GetCpuAffinityWithinScope(int, AffinityScope) ([]uint, Return) GetCreatableVgpus() ([]VgpuTypeId, Return) GetCudaComputeCapability() (int, int, Return) GetCurrPcieLinkGeneration() (int, Return) GetCurrPcieLinkWidth() (int, Return) + GetCurrentClockFreqs() (DeviceCurrentClockFreqs, Return) + GetCurrentClocksEventReasons() (uint64, Return) GetCurrentClocksThrottleReasons() (uint64, Return) GetDecoderUtilization() (uint32, uint32, Return) GetDefaultApplicationsClock(ClockType) (uint32, Return) @@ -660,7 +818,9 @@ type Device interface { GetDeviceHandleFromMigDeviceHandle() (Device, Return) GetDisplayActive() (EnableState, Return) GetDisplayMode() (EnableState, Return) + GetDramEncryptionMode() (DramEncryptionInfo, DramEncryptionInfo, Return) GetDriverModel() (DriverModel, DriverModel, Return) + GetDriverModel_v2() (DriverModel, DriverModel, Return) GetDynamicPstatesInfo() (GpuDynamicPstatesInfo, Return) GetEccMode() (EnableState, EnableState, Return) GetEncoderCapacity(EncoderType) (int, Return) @@ -672,16 +832,18 @@ type Device interface { GetFBCStats() (FBCStats, Return) GetFanControlPolicy_v2(int) (FanControlPolicy, Return) GetFanSpeed() (uint32, Return) + GetFanSpeedRPM() (FanSpeedInfo, Return) GetFanSpeed_v2(int) (uint32, Return) GetFieldValues([]FieldValue) Return GetGpcClkMinMaxVfOffset() (int, int, Return) GetGpcClkVfOffset() (int, Return) GetGpuFabricInfo() (GpuFabricInfo, Return) + GetGpuFabricInfoV() GpuFabricInfoHandler GetGpuInstanceById(int) (GpuInstance, Return) GetGpuInstanceId() (int, Return) GetGpuInstancePossiblePlacements(*GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) GetGpuInstanceProfileInfo(int) (GpuInstanceProfileInfo, Return) - GetGpuInstanceProfileInfoV(int) GpuInstanceProfileInfoV + GetGpuInstanceProfileInfoV(int) GpuInstanceProfileInfoHandler GetGpuInstanceRemainingCapacity(*GpuInstanceProfileInfo) (int, Return) GetGpuInstances(*GpuInstanceProfileInfo) ([]GpuInstance, Return) GetGpuMaxPcieLinkGeneration() (int, Return) @@ -696,7 +858,10 @@ type Device interface { GetInforomImageVersion() (string, Return) GetInforomVersion(InforomObject) (string, Return) GetIrqNum() (int, Return) + GetJpgUtilization() (uint32, uint32, Return) + GetLastBBXFlushTime() (uint64, uint, Return) GetMPSComputeRunningProcesses() ([]ProcessInfo, Return) + GetMarginTemperature() (MarginTemperature, Return) GetMaxClockInfo(ClockType) (uint32, Return) GetMaxCustomerBoostClock(ClockType) (uint32, Return) GetMaxMigDeviceCount() (int, Return) @@ -714,10 +879,12 @@ type Device interface { GetMinMaxClockOfPState(ClockType, Pstates) (uint32, uint32, Return) GetMinMaxFanSpeed() (int, int, Return) GetMinorNumber() (int, Return) + GetModuleId() (int, Return) GetMultiGpuBoard() (int, Return) GetName() (string, Return) GetNumFans() (int, Return) GetNumGpuCores() (int, Return) + GetNumaNodeId() (int, Return) GetNvLinkCapability(int, NvLinkCapability) (uint32, Return) GetNvLinkErrorCounter(int, NvLinkErrorCounter) (uint64, Return) GetNvLinkRemoteDeviceType(int) (IntNvLinkDeviceType, Return) @@ -726,15 +893,21 @@ type Device interface { GetNvLinkUtilizationControl(int, int) (NvLinkUtilizationControl, Return) GetNvLinkUtilizationCounter(int, int) (uint64, uint64, Return) GetNvLinkVersion(int) (uint32, Return) + GetNvlinkBwMode() (NvlinkGetBwMode, Return) + GetNvlinkSupportedBwModes() (NvlinkSupportedBwModes, Return) + GetOfaUtilization() (uint32, uint32, Return) GetP2PStatus(Device, GpuP2PCapsIndex) (GpuP2PStatus, Return) GetPciInfo() (PciInfo, Return) + GetPciInfoExt() (PciInfoExt, Return) GetPcieLinkMaxSpeed() (uint32, Return) GetPcieReplayCounter() (int, Return) GetPcieSpeed() (int, Return) GetPcieThroughput(PcieUtilCounter) (uint32, Return) + GetPerformanceModes() (DevicePerfModes, Return) GetPerformanceState() (Pstates, Return) GetPersistenceMode() (EnableState, Return) GetPgpuMetadataString() (string, Return) + GetPlatformInfo() (PlatformInfo, Return) GetPowerManagementDefaultLimit() (uint32, Return) GetPowerManagementLimit() (uint32, Return) GetPowerManagementLimitConstraints() (uint32, uint32, Return) @@ -743,13 +916,17 @@ type Device interface { GetPowerState() (Pstates, Return) GetPowerUsage() (uint32, Return) GetProcessUtilization(uint64) ([]ProcessUtilizationSample, Return) + GetProcessesUtilizationInfo() (ProcessesUtilizationInfo, Return) GetRemappedRows() (int, int, bool, bool, Return) GetRetiredPages(PageRetirementCause) ([]uint64, Return) GetRetiredPagesPendingStatus() (EnableState, Return) GetRetiredPages_v2(PageRetirementCause) ([]uint64, []uint64, Return) GetRowRemapperHistogram() (RowRemapperHistogramValues, Return) + GetRunningProcessDetailList() (ProcessDetailList, Return) GetSamples(SamplingType, uint64) (ValueType, []Sample, Return) GetSerial() (string, Return) + GetSramEccErrorStatus() (EccSramErrorStatus, Return) + GetSupportedClocksEventReasons() (uint64, Return) GetSupportedClocksThrottleReasons() (uint64, Return) GetSupportedEventTypes() (uint64, Return) GetSupportedGraphicsClocks(int) (int, uint32, Return) @@ -759,6 +936,7 @@ type Device interface { GetTargetFanSpeed(int) (int, Return) GetTemperature(TemperatureSensors) (uint32, Return) GetTemperatureThreshold(TemperatureThresholds) (uint32, Return) + GetTemperatureV() TemperatureHandler GetThermalSettings(uint32) (GpuThermalSettings, Return) GetTopologyCommonAncestor(Device) (GpuTopologyLevel, Return) GetTopologyNearestGpus(GpuTopologyLevel) ([]Device, Return) @@ -768,20 +946,30 @@ type Device interface { GetUtilizationRates() (Utilization, Return) GetVbiosVersion() (string, Return) GetVgpuCapabilities(DeviceVgpuCapability) (bool, Return) + GetVgpuHeterogeneousMode() (VgpuHeterogeneousMode, Return) + GetVgpuInstancesUtilizationInfo() (VgpuInstancesUtilizationInfo, Return) GetVgpuMetadata() (VgpuPgpuMetadata, Return) GetVgpuProcessUtilization(uint64) ([]VgpuProcessUtilizationSample, Return) + GetVgpuProcessesUtilizationInfo() (VgpuProcessesUtilizationInfo, Return) GetVgpuSchedulerCapabilities() (VgpuSchedulerCapabilities, Return) GetVgpuSchedulerLog() (VgpuSchedulerLog, Return) GetVgpuSchedulerState() (VgpuSchedulerGetState, Return) + GetVgpuTypeCreatablePlacements(VgpuTypeId) (VgpuPlacementList, Return) + GetVgpuTypeSupportedPlacements(VgpuTypeId) (VgpuPlacementList, Return) GetVgpuUtilization(uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) GetViolationStatus(PerfPolicyType) (ViolationTime, Return) GetVirtualizationMode() (GpuVirtualizationMode, Return) GpmMigSampleGet(int, GpmSample) Return GpmQueryDeviceSupport() (GpmSupport, Return) GpmQueryDeviceSupportV() GpmSupportV + GpmQueryIfStreamingEnabled() (uint32, Return) GpmSampleGet(GpmSample) Return + GpmSetStreamingEnabled(uint32) Return IsMigDeviceHandle() (bool, Return) OnSameBoard(Device) (int, Return) + PowerSmoothingActivatePresetProfile(*PowerSmoothingProfile) Return + PowerSmoothingSetState(*PowerSmoothingState) Return + PowerSmoothingUpdatePresetProfileParam(*PowerSmoothingProfile) Return RegisterEvents(uint64, EventSet) Return ResetApplicationsClocks() Return ResetGpuLockedClocks() Return @@ -792,10 +980,13 @@ type Device interface { SetAccountingMode(EnableState) Return SetApplicationsClocks(uint32, uint32) Return SetAutoBoostedClocksEnabled(EnableState) Return + SetClockOffsets(ClockOffset) Return SetComputeMode(ComputeMode) Return + SetConfComputeUnprotectedMemSize(uint64) Return SetCpuAffinity() Return SetDefaultAutoBoostedClocksEnabled(EnableState, uint32) Return SetDefaultFanSpeed_v2(int) Return + SetDramEncryptionMode(*DramEncryptionInfo) Return SetDriverModel(DriverModel, uint32) Return SetEccMode(EnableState) Return SetFanControlPolicy(int, FanControlPolicy) Return @@ -808,13 +999,21 @@ type Device interface { SetMigMode(int) (Return, Return) SetNvLinkDeviceLowPowerThreshold(*NvLinkPowerThres) Return SetNvLinkUtilizationControl(int, int, *NvLinkUtilizationControl, bool) Return + SetNvlinkBwMode(*NvlinkSetBwMode) Return SetPersistenceMode(EnableState) Return SetPowerManagementLimit(uint32) Return + SetPowerManagementLimit_v2(*PowerValue_v2) Return SetTemperatureThreshold(TemperatureThresholds, int) Return + SetVgpuCapabilities(DeviceVgpuCapability, EnableState) Return + SetVgpuHeterogeneousMode(VgpuHeterogeneousMode) Return SetVgpuSchedulerState(*VgpuSchedulerSetState) Return SetVirtualizationMode(GpuVirtualizationMode) Return ValidateInforom() Return VgpuTypeGetMaxInstances(VgpuTypeId) (int, Return) + WorkloadPowerProfileClearRequestedProfiles(*WorkloadPowerProfileRequestedProfiles) Return + WorkloadPowerProfileGetCurrentProfiles() (WorkloadPowerProfileCurrentProfiles, Return) + WorkloadPowerProfileGetProfilesInfo() (WorkloadPowerProfileProfilesInfo, Return) + WorkloadPowerProfileSetRequestedProfiles(*WorkloadPowerProfileRequestedProfiles) Return } // GpuInstance represents the interface for the nvmlGpuInstance type. @@ -824,13 +1023,21 @@ type GpuInstance interface { CreateComputeInstance(*ComputeInstanceProfileInfo) (ComputeInstance, Return) CreateComputeInstanceWithPlacement(*ComputeInstanceProfileInfo, *ComputeInstancePlacement) (ComputeInstance, Return) Destroy() Return + GetActiveVgpus() (ActiveVgpuInstanceInfo, Return) GetComputeInstanceById(int) (ComputeInstance, Return) GetComputeInstancePossiblePlacements(*ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) GetComputeInstanceProfileInfo(int, int) (ComputeInstanceProfileInfo, Return) - GetComputeInstanceProfileInfoV(int, int) ComputeInstanceProfileInfoV + GetComputeInstanceProfileInfoV(int, int) ComputeInstanceProfileInfoHandler GetComputeInstanceRemainingCapacity(*ComputeInstanceProfileInfo) (int, Return) GetComputeInstances(*ComputeInstanceProfileInfo) ([]ComputeInstance, Return) + GetCreatableVgpus() (VgpuTypeIdInfo, Return) GetInfo() (GpuInstanceInfo, Return) + GetVgpuHeterogeneousMode() (VgpuHeterogeneousMode, Return) + GetVgpuSchedulerLog() (VgpuSchedulerLogInfo, Return) + GetVgpuSchedulerState() (VgpuSchedulerStateInfo, Return) + GetVgpuTypeCreatablePlacements() (VgpuCreatablePlacementInfo, Return) + SetVgpuHeterogeneousMode(*VgpuHeterogeneousMode) Return + SetVgpuSchedulerState(*VgpuSchedulerState) Return } // ComputeInstance represents the interface for the nvmlComputeInstance type. @@ -893,6 +1100,7 @@ type VgpuInstance interface { GetLicenseStatus() (int, Return) GetMdevUUID() (string, Return) GetMetadata() (VgpuMetadata, Return) + GetRuntimeStateSize() (VgpuRuntimeState, Return) GetType() (VgpuTypeId, Return) GetUUID() (string, Return) GetVmDriverVersion() (string, Return) @@ -904,8 +1112,10 @@ type VgpuInstance interface { // //go:generate moq -out mock/vgputypeid.go -pkg mock . VgpuTypeId:VgpuTypeId type VgpuTypeId interface { + GetBAR1Info() (VgpuTypeBar1Info, Return) GetCapabilities(VgpuCapability) (bool, Return) GetClass() (string, Return) + GetCreatablePlacements(Device) (VgpuPlacementList, Return) GetDeviceID() (uint64, uint64, Return) GetFrameRateLimit() (uint32, Return) GetFramebufferSize() (uint64, Return) @@ -916,4 +1126,5 @@ type VgpuTypeId interface { GetName() (string, Return) GetNumDisplayHeads() (int, Return) GetResolution(int) (uint32, uint32, Return) + GetSupportedPlacements(Device) (VgpuPlacementList, Return) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aa..7e19eba 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab10..1906341 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09b..2162908 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a..1d2f718 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f..4e91332 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 0000000..baa0cc7 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 0000000..b83c6cf --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 0000000..e78f7df --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f..d892195 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42dd..8b32836 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310..1bd8730 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772df..6b7ce92 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/modules.txt b/vendor/modules.txt index be6089a..8e97acc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,7 +1,7 @@ # github.com/NVIDIA/go-nvlib v0.5.0 ## explicit; go 1.20 github.com/NVIDIA/go-nvlib/pkg/nvlib/device -# github.com/NVIDIA/go-nvml v0.12.0-6 +# github.com/NVIDIA/go-nvml v0.12.9-0 ## explicit; go 1.20 github.com/NVIDIA/go-nvml/pkg/dl github.com/NVIDIA/go-nvml/pkg/nvml @@ -15,9 +15,10 @@ github.com/google/uuid # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require # gopkg.in/yaml.v3 v3.0.1 ## explicit