From 338d5b14fdd8ca0e3e15ce81a11316ab675b3d3d Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 22 Jan 2026 12:38:21 -0500 Subject: [PATCH 1/6] cuda.bindings.nvml: Big wave of handwritten API --- .../cuda/bindings/_internal/_nvml.pxd | 9 +- .../cuda/bindings/_internal/_nvml_linux.pyx | 149 +- .../cuda/bindings/_internal/_nvml_windows.pyx | 121 +- cuda_bindings/cuda/bindings/_nvml.pxd | 52 +- cuda_bindings/cuda/bindings/_nvml.pyx | 6088 ++++++----------- cuda_bindings/cuda/bindings/cy_nvml.pxd | 9 +- cuda_bindings/cuda/bindings/cy_nvml.pyx | 30 +- cuda_bindings/tests/nvml/test_device.py | 159 + cuda_bindings/tests/nvml/test_gpu.py | 47 +- cuda_bindings/tests/nvml/test_pci.py | 33 + cuda_core/tests/system/test_system_device.py | 2 +- 11 files changed, 2472 insertions(+), 4227 deletions(-) create mode 100644 cuda_bindings/tests/nvml/test_device.py create mode 100644 cuda_bindings/tests/nvml/test_pci.py diff --git a/cuda_bindings/cuda/bindings/_internal/_nvml.pxd b/cuda_bindings/cuda/bindings/_internal/_nvml.pxd index 298a1a72f9..ec16c69ed3 100644 --- a/cuda_bindings/cuda/bindings/_internal/_nvml.pxd +++ b/cuda_bindings/cuda/bindings/_internal/_nvml.pxd @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # @@ -198,7 +198,6 @@ cdef nvmlReturn_t _nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t dev cdef nvmlReturn_t _nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil @@ -337,13 +336,7 @@ cdef nvmlReturn_t _nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned cdef nvmlReturn_t _nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, nvmlDevice_t* migDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t _nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t _nvmlDevicePowerSmoothingSetState(nvmlDevice_t device, nvmlPowerSmoothingState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil diff --git a/cuda_bindings/cuda/bindings/_internal/_nvml_linux.pyx b/cuda_bindings/cuda/bindings/_internal/_nvml_linux.pyx index 9031f6f7fe..4a0a644d4a 100644 --- a/cuda_bindings/cuda/bindings/_internal/_nvml_linux.pyx +++ b/cuda_bindings/cuda/bindings/_internal/_nvml_linux.pyx @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # @@ -247,7 +247,6 @@ cdef void* __nvmlDeviceSetDefaultAutoBoostedClocksEnabled = NULL cdef void* __nvmlDeviceSetDefaultFanSpeed_v2 = NULL cdef void* __nvmlDeviceSetFanControlPolicy = NULL cdef void* __nvmlDeviceSetTemperatureThreshold = NULL -cdef void* __nvmlDeviceSetPowerManagementLimit = NULL cdef void* __nvmlDeviceSetGpuOperationMode = NULL cdef void* __nvmlDeviceSetAPIRestriction = NULL cdef void* __nvmlDeviceSetFanSpeed_v2 = NULL @@ -386,13 +385,7 @@ cdef void* __nvmlDeviceGetComputeInstanceId = NULL cdef void* __nvmlDeviceGetMaxMigDeviceCount = NULL cdef void* __nvmlDeviceGetMigDeviceHandleByIndex = NULL cdef void* __nvmlDeviceGetDeviceHandleFromMigDeviceHandle = NULL -cdef void* __nvmlGpmSampleGet = NULL -cdef void* __nvmlGpmMigSampleGet = NULL -cdef void* __nvmlGpmQueryDeviceSupport = NULL -cdef void* __nvmlGpmQueryIfStreamingEnabled = NULL -cdef void* __nvmlGpmSetStreamingEnabled = NULL cdef void* __nvmlDeviceGetCapabilities = NULL -cdef void* __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles = NULL cdef void* __nvmlDevicePowerSmoothingActivatePresetProfile = NULL cdef void* __nvmlDevicePowerSmoothingUpdatePresetProfileParam = NULL cdef void* __nvmlDevicePowerSmoothingSetState = NULL @@ -1735,13 +1728,6 @@ cdef int _init_nvml() except -1 nogil: handle = load_library() __nvmlDeviceSetTemperatureThreshold = dlsym(handle, 'nvmlDeviceSetTemperatureThreshold') - global __nvmlDeviceSetPowerManagementLimit - __nvmlDeviceSetPowerManagementLimit = dlsym(RTLD_DEFAULT, 'nvmlDeviceSetPowerManagementLimit') - if __nvmlDeviceSetPowerManagementLimit == NULL: - if handle == NULL: - handle = load_library() - __nvmlDeviceSetPowerManagementLimit = dlsym(handle, 'nvmlDeviceSetPowerManagementLimit') - global __nvmlDeviceSetGpuOperationMode __nvmlDeviceSetGpuOperationMode = dlsym(RTLD_DEFAULT, 'nvmlDeviceSetGpuOperationMode') if __nvmlDeviceSetGpuOperationMode == NULL: @@ -2708,41 +2694,6 @@ cdef int _init_nvml() except -1 nogil: handle = load_library() __nvmlDeviceGetDeviceHandleFromMigDeviceHandle = dlsym(handle, 'nvmlDeviceGetDeviceHandleFromMigDeviceHandle') - global __nvmlGpmSampleGet - __nvmlGpmSampleGet = dlsym(RTLD_DEFAULT, 'nvmlGpmSampleGet') - if __nvmlGpmSampleGet == NULL: - if handle == NULL: - handle = load_library() - __nvmlGpmSampleGet = dlsym(handle, 'nvmlGpmSampleGet') - - global __nvmlGpmMigSampleGet - __nvmlGpmMigSampleGet = dlsym(RTLD_DEFAULT, 'nvmlGpmMigSampleGet') - if __nvmlGpmMigSampleGet == NULL: - if handle == NULL: - handle = load_library() - __nvmlGpmMigSampleGet = dlsym(handle, 'nvmlGpmMigSampleGet') - - global __nvmlGpmQueryDeviceSupport - __nvmlGpmQueryDeviceSupport = dlsym(RTLD_DEFAULT, 'nvmlGpmQueryDeviceSupport') - if __nvmlGpmQueryDeviceSupport == NULL: - if handle == NULL: - handle = load_library() - __nvmlGpmQueryDeviceSupport = dlsym(handle, 'nvmlGpmQueryDeviceSupport') - - global __nvmlGpmQueryIfStreamingEnabled - __nvmlGpmQueryIfStreamingEnabled = dlsym(RTLD_DEFAULT, 'nvmlGpmQueryIfStreamingEnabled') - if __nvmlGpmQueryIfStreamingEnabled == NULL: - if handle == NULL: - handle = load_library() - __nvmlGpmQueryIfStreamingEnabled = dlsym(handle, 'nvmlGpmQueryIfStreamingEnabled') - - global __nvmlGpmSetStreamingEnabled - __nvmlGpmSetStreamingEnabled = dlsym(RTLD_DEFAULT, 'nvmlGpmSetStreamingEnabled') - if __nvmlGpmSetStreamingEnabled == NULL: - if handle == NULL: - handle = load_library() - __nvmlGpmSetStreamingEnabled = dlsym(handle, 'nvmlGpmSetStreamingEnabled') - global __nvmlDeviceGetCapabilities __nvmlDeviceGetCapabilities = dlsym(RTLD_DEFAULT, 'nvmlDeviceGetCapabilities') if __nvmlDeviceGetCapabilities == NULL: @@ -2750,13 +2701,6 @@ cdef int _init_nvml() except -1 nogil: handle = load_library() __nvmlDeviceGetCapabilities = dlsym(handle, 'nvmlDeviceGetCapabilities') - global __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles = dlsym(RTLD_DEFAULT, 'nvmlDeviceWorkloadPowerProfileClearRequestedProfiles') - if __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles == NULL: - if handle == NULL: - handle = load_library() - __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles = dlsym(handle, 'nvmlDeviceWorkloadPowerProfileClearRequestedProfiles') - global __nvmlDevicePowerSmoothingActivatePresetProfile __nvmlDevicePowerSmoothingActivatePresetProfile = dlsym(RTLD_DEFAULT, 'nvmlDevicePowerSmoothingActivatePresetProfile') if __nvmlDevicePowerSmoothingActivatePresetProfile == NULL: @@ -3459,9 +3403,6 @@ cpdef dict _inspect_function_pointers(): global __nvmlDeviceSetTemperatureThreshold data["__nvmlDeviceSetTemperatureThreshold"] = __nvmlDeviceSetTemperatureThreshold - global __nvmlDeviceSetPowerManagementLimit - data["__nvmlDeviceSetPowerManagementLimit"] = __nvmlDeviceSetPowerManagementLimit - global __nvmlDeviceSetGpuOperationMode data["__nvmlDeviceSetGpuOperationMode"] = __nvmlDeviceSetGpuOperationMode @@ -3876,27 +3817,9 @@ cpdef dict _inspect_function_pointers(): global __nvmlDeviceGetDeviceHandleFromMigDeviceHandle data["__nvmlDeviceGetDeviceHandleFromMigDeviceHandle"] = __nvmlDeviceGetDeviceHandleFromMigDeviceHandle - global __nvmlGpmSampleGet - data["__nvmlGpmSampleGet"] = __nvmlGpmSampleGet - - global __nvmlGpmMigSampleGet - data["__nvmlGpmMigSampleGet"] = __nvmlGpmMigSampleGet - - global __nvmlGpmQueryDeviceSupport - data["__nvmlGpmQueryDeviceSupport"] = __nvmlGpmQueryDeviceSupport - - global __nvmlGpmQueryIfStreamingEnabled - data["__nvmlGpmQueryIfStreamingEnabled"] = __nvmlGpmQueryIfStreamingEnabled - - global __nvmlGpmSetStreamingEnabled - data["__nvmlGpmSetStreamingEnabled"] = __nvmlGpmSetStreamingEnabled - global __nvmlDeviceGetCapabilities data["__nvmlDeviceGetCapabilities"] = __nvmlDeviceGetCapabilities - global __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - data["__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles"] = __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - global __nvmlDevicePowerSmoothingActivatePresetProfile data["__nvmlDevicePowerSmoothingActivatePresetProfile"] = __nvmlDevicePowerSmoothingActivatePresetProfile @@ -5833,16 +5756,6 @@ cdef nvmlReturn_t _nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTe device, thresholdType, temp) -cdef nvmlReturn_t _nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlDeviceSetPowerManagementLimit - _check_or_init_nvml() - if __nvmlDeviceSetPowerManagementLimit == NULL: - with gil: - raise FunctionNotFoundError("function nvmlDeviceSetPowerManagementLimit is not found") - return (__nvmlDeviceSetPowerManagementLimit)( - device, limit) - - cdef nvmlReturn_t _nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: global __nvmlDeviceSetGpuOperationMode _check_or_init_nvml() @@ -7223,56 +7136,6 @@ cdef nvmlReturn_t _nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t mig migDevice, device) -cdef nvmlReturn_t _nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmSampleGet - _check_or_init_nvml() - if __nvmlGpmSampleGet == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmSampleGet is not found") - return (__nvmlGpmSampleGet)( - device, gpmSample) - - -cdef nvmlReturn_t _nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmMigSampleGet - _check_or_init_nvml() - if __nvmlGpmMigSampleGet == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmMigSampleGet is not found") - return (__nvmlGpmMigSampleGet)( - device, gpuInstanceId, gpmSample) - - -cdef nvmlReturn_t _nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmQueryDeviceSupport - _check_or_init_nvml() - if __nvmlGpmQueryDeviceSupport == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmQueryDeviceSupport is not found") - return (__nvmlGpmQueryDeviceSupport)( - device, gpmSupport) - - -cdef nvmlReturn_t _nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmQueryIfStreamingEnabled - _check_or_init_nvml() - if __nvmlGpmQueryIfStreamingEnabled == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmQueryIfStreamingEnabled is not found") - return (__nvmlGpmQueryIfStreamingEnabled)( - device, state) - - -cdef nvmlReturn_t _nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmSetStreamingEnabled - _check_or_init_nvml() - if __nvmlGpmSetStreamingEnabled == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmSetStreamingEnabled is not found") - return (__nvmlGpmSetStreamingEnabled)( - device, state) - - cdef nvmlReturn_t _nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: global __nvmlDeviceGetCapabilities _check_or_init_nvml() @@ -7283,16 +7146,6 @@ cdef nvmlReturn_t _nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapa device, caps) -cdef nvmlReturn_t _nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - _check_or_init_nvml() - if __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles == NULL: - with gil: - raise FunctionNotFoundError("function nvmlDeviceWorkloadPowerProfileClearRequestedProfiles is not found") - return (__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles)( - device, requestedProfiles) - - cdef nvmlReturn_t _nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: global __nvmlDevicePowerSmoothingActivatePresetProfile _check_or_init_nvml() diff --git a/cuda_bindings/cuda/bindings/_internal/_nvml_windows.pyx b/cuda_bindings/cuda/bindings/_internal/_nvml_windows.pyx index d8a9be4c48..00ec950f15 100644 --- a/cuda_bindings/cuda/bindings/_internal/_nvml_windows.pyx +++ b/cuda_bindings/cuda/bindings/_internal/_nvml_windows.pyx @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # @@ -264,7 +264,6 @@ cdef void* __nvmlDeviceSetDefaultAutoBoostedClocksEnabled = NULL cdef void* __nvmlDeviceSetDefaultFanSpeed_v2 = NULL cdef void* __nvmlDeviceSetFanControlPolicy = NULL cdef void* __nvmlDeviceSetTemperatureThreshold = NULL -cdef void* __nvmlDeviceSetPowerManagementLimit = NULL cdef void* __nvmlDeviceSetGpuOperationMode = NULL cdef void* __nvmlDeviceSetAPIRestriction = NULL cdef void* __nvmlDeviceSetFanSpeed_v2 = NULL @@ -403,13 +402,7 @@ cdef void* __nvmlDeviceGetComputeInstanceId = NULL cdef void* __nvmlDeviceGetMaxMigDeviceCount = NULL cdef void* __nvmlDeviceGetMigDeviceHandleByIndex = NULL cdef void* __nvmlDeviceGetDeviceHandleFromMigDeviceHandle = NULL -cdef void* __nvmlGpmSampleGet = NULL -cdef void* __nvmlGpmMigSampleGet = NULL -cdef void* __nvmlGpmQueryDeviceSupport = NULL -cdef void* __nvmlGpmQueryIfStreamingEnabled = NULL -cdef void* __nvmlGpmSetStreamingEnabled = NULL cdef void* __nvmlDeviceGetCapabilities = NULL -cdef void* __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles = NULL cdef void* __nvmlDevicePowerSmoothingActivatePresetProfile = NULL cdef void* __nvmlDevicePowerSmoothingUpdatePresetProfileParam = NULL cdef void* __nvmlDevicePowerSmoothingSetState = NULL @@ -1029,9 +1022,6 @@ cdef int _init_nvml() except -1 nogil: global __nvmlDeviceSetTemperatureThreshold __nvmlDeviceSetTemperatureThreshold = GetProcAddress(handle, 'nvmlDeviceSetTemperatureThreshold') - global __nvmlDeviceSetPowerManagementLimit - __nvmlDeviceSetPowerManagementLimit = GetProcAddress(handle, 'nvmlDeviceSetPowerManagementLimit') - global __nvmlDeviceSetGpuOperationMode __nvmlDeviceSetGpuOperationMode = GetProcAddress(handle, 'nvmlDeviceSetGpuOperationMode') @@ -1446,27 +1436,9 @@ cdef int _init_nvml() except -1 nogil: global __nvmlDeviceGetDeviceHandleFromMigDeviceHandle __nvmlDeviceGetDeviceHandleFromMigDeviceHandle = GetProcAddress(handle, 'nvmlDeviceGetDeviceHandleFromMigDeviceHandle') - global __nvmlGpmSampleGet - __nvmlGpmSampleGet = GetProcAddress(handle, 'nvmlGpmSampleGet') - - global __nvmlGpmMigSampleGet - __nvmlGpmMigSampleGet = GetProcAddress(handle, 'nvmlGpmMigSampleGet') - - global __nvmlGpmQueryDeviceSupport - __nvmlGpmQueryDeviceSupport = GetProcAddress(handle, 'nvmlGpmQueryDeviceSupport') - - global __nvmlGpmQueryIfStreamingEnabled - __nvmlGpmQueryIfStreamingEnabled = GetProcAddress(handle, 'nvmlGpmQueryIfStreamingEnabled') - - global __nvmlGpmSetStreamingEnabled - __nvmlGpmSetStreamingEnabled = GetProcAddress(handle, 'nvmlGpmSetStreamingEnabled') - global __nvmlDeviceGetCapabilities __nvmlDeviceGetCapabilities = GetProcAddress(handle, 'nvmlDeviceGetCapabilities') - global __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles = GetProcAddress(handle, 'nvmlDeviceWorkloadPowerProfileClearRequestedProfiles') - global __nvmlDevicePowerSmoothingActivatePresetProfile __nvmlDevicePowerSmoothingActivatePresetProfile = GetProcAddress(handle, 'nvmlDevicePowerSmoothingActivatePresetProfile') @@ -2101,9 +2073,6 @@ cpdef dict _inspect_function_pointers(): global __nvmlDeviceSetTemperatureThreshold data["__nvmlDeviceSetTemperatureThreshold"] = __nvmlDeviceSetTemperatureThreshold - global __nvmlDeviceSetPowerManagementLimit - data["__nvmlDeviceSetPowerManagementLimit"] = __nvmlDeviceSetPowerManagementLimit - global __nvmlDeviceSetGpuOperationMode data["__nvmlDeviceSetGpuOperationMode"] = __nvmlDeviceSetGpuOperationMode @@ -2518,27 +2487,9 @@ cpdef dict _inspect_function_pointers(): global __nvmlDeviceGetDeviceHandleFromMigDeviceHandle data["__nvmlDeviceGetDeviceHandleFromMigDeviceHandle"] = __nvmlDeviceGetDeviceHandleFromMigDeviceHandle - global __nvmlGpmSampleGet - data["__nvmlGpmSampleGet"] = __nvmlGpmSampleGet - - global __nvmlGpmMigSampleGet - data["__nvmlGpmMigSampleGet"] = __nvmlGpmMigSampleGet - - global __nvmlGpmQueryDeviceSupport - data["__nvmlGpmQueryDeviceSupport"] = __nvmlGpmQueryDeviceSupport - - global __nvmlGpmQueryIfStreamingEnabled - data["__nvmlGpmQueryIfStreamingEnabled"] = __nvmlGpmQueryIfStreamingEnabled - - global __nvmlGpmSetStreamingEnabled - data["__nvmlGpmSetStreamingEnabled"] = __nvmlGpmSetStreamingEnabled - global __nvmlDeviceGetCapabilities data["__nvmlDeviceGetCapabilities"] = __nvmlDeviceGetCapabilities - global __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - data["__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles"] = __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - global __nvmlDevicePowerSmoothingActivatePresetProfile data["__nvmlDevicePowerSmoothingActivatePresetProfile"] = __nvmlDevicePowerSmoothingActivatePresetProfile @@ -4475,16 +4426,6 @@ cdef nvmlReturn_t _nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTe device, thresholdType, temp) -cdef nvmlReturn_t _nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlDeviceSetPowerManagementLimit - _check_or_init_nvml() - if __nvmlDeviceSetPowerManagementLimit == NULL: - with gil: - raise FunctionNotFoundError("function nvmlDeviceSetPowerManagementLimit is not found") - return (__nvmlDeviceSetPowerManagementLimit)( - device, limit) - - cdef nvmlReturn_t _nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: global __nvmlDeviceSetGpuOperationMode _check_or_init_nvml() @@ -5865,56 +5806,6 @@ cdef nvmlReturn_t _nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t mig migDevice, device) -cdef nvmlReturn_t _nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmSampleGet - _check_or_init_nvml() - if __nvmlGpmSampleGet == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmSampleGet is not found") - return (__nvmlGpmSampleGet)( - device, gpmSample) - - -cdef nvmlReturn_t _nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmMigSampleGet - _check_or_init_nvml() - if __nvmlGpmMigSampleGet == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmMigSampleGet is not found") - return (__nvmlGpmMigSampleGet)( - device, gpuInstanceId, gpmSample) - - -cdef nvmlReturn_t _nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmQueryDeviceSupport - _check_or_init_nvml() - if __nvmlGpmQueryDeviceSupport == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmQueryDeviceSupport is not found") - return (__nvmlGpmQueryDeviceSupport)( - device, gpmSupport) - - -cdef nvmlReturn_t _nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmQueryIfStreamingEnabled - _check_or_init_nvml() - if __nvmlGpmQueryIfStreamingEnabled == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmQueryIfStreamingEnabled is not found") - return (__nvmlGpmQueryIfStreamingEnabled)( - device, state) - - -cdef nvmlReturn_t _nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlGpmSetStreamingEnabled - _check_or_init_nvml() - if __nvmlGpmSetStreamingEnabled == NULL: - with gil: - raise FunctionNotFoundError("function nvmlGpmSetStreamingEnabled is not found") - return (__nvmlGpmSetStreamingEnabled)( - device, state) - - cdef nvmlReturn_t _nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: global __nvmlDeviceGetCapabilities _check_or_init_nvml() @@ -5925,16 +5816,6 @@ cdef nvmlReturn_t _nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapa device, caps) -cdef nvmlReturn_t _nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - global __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles - _check_or_init_nvml() - if __nvmlDeviceWorkloadPowerProfileClearRequestedProfiles == NULL: - with gil: - raise FunctionNotFoundError("function nvmlDeviceWorkloadPowerProfileClearRequestedProfiles is not found") - return (__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles)( - device, requestedProfiles) - - cdef nvmlReturn_t _nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: global __nvmlDevicePowerSmoothingActivatePresetProfile _check_or_init_nvml() diff --git a/cuda_bindings/cuda/bindings/_nvml.pxd b/cuda_bindings/cuda/bindings/_nvml.pxd index 4dd1c728a2..c407f13536 100644 --- a/cuda_bindings/cuda/bindings/_nvml.pxd +++ b/cuda_bindings/cuda/bindings/_nvml.pxd @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # @@ -16,11 +16,18 @@ from .cy_nvml cimport * ctypedef nvmlDramEncryptionInfo_v1_t DramEncryptionInfo_v1 ctypedef nvmlMarginTemperature_v1_t MarginTemperature_v1 ctypedef nvmlFanSpeedInfo_v1_t FanSpeedInfo_v1 +ctypedef nvmlDevicePerfModes_v1_t DevicePerfModes_v1 +ctypedef nvmlVgpuHeterogeneousMode_v1_t VgpuHeterogeneousMode_v1 +ctypedef nvmlVgpuPlacementId_v1_t VgpuPlacementId_v1 +ctypedef nvmlVgpuRuntimeState_v1_t VgpuRuntimeState_v1 ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t ConfComputeSetKeyRotationThresholdInfo_v1 +ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t ConfComputeGetKeyRotationThresholdInfo_v1 ctypedef nvmlSystemDriverBranchInfo_v1_t SystemDriverBranchInfo_v1 ctypedef nvmlTemperature_v1_t Temperature_v1 +ctypedef nvmlDeviceCapabilities_v1_t DeviceCapabilities_v1 ctypedef nvmlPowerSmoothingProfile_v1_t PowerSmoothingProfile_v1 ctypedef nvmlPowerSmoothingState_v1_t PowerSmoothingState_v1 +ctypedef nvmlPdi_v1_t Pdi_v1 ctypedef nvmlDevice_t Device ctypedef nvmlGpuInstance_t GpuInstance ctypedef nvmlUnit_t Unit @@ -37,16 +44,20 @@ ctypedef nvmlUUIDValue_t UUIDValue ctypedef nvmlVgpuPlacementList_v1_t VgpuPlacementList_v1 ctypedef nvmlNvLinkPowerThres_t NvLinkPowerThres ctypedef nvmlGpuInstanceProfileInfo_t GpuInstanceProfileInfo +ctypedef nvmlGpuInstanceProfileInfo_v2_t GpuInstanceProfileInfo_v2 ctypedef nvmlComputeInstanceProfileInfo_t ComputeInstanceProfileInfo +ctypedef nvmlGpmSupport_t GpmSupport ctypedef nvmlMask255_t Mask255 ctypedef nvmlHostname_v1_t Hostname_v1 +ctypedef nvmlUnrepairableMemoryStatus_v1_t UnrepairableMemoryStatus_v1 +ctypedef nvmlRusdSettings_v1_t RusdSettings_v1 ctypedef nvmlPowerValue_v2_t PowerValue_v2 +ctypedef nvmlVgpuTypeMaxInstance_v1_t VgpuTypeMaxInstance_v1 ctypedef nvmlVgpuProcessUtilizationSample_t VgpuProcessUtilizationSample ctypedef nvmlGpuFabricInfo_t GpuFabricInfo ctypedef nvmlSystemEventSetCreateRequest_v1_t SystemEventSetCreateRequest_v1 ctypedef nvmlSystemEventSetFreeRequest_v1_t SystemEventSetFreeRequest_v1 ctypedef nvmlSystemRegisterEventRequest_v1_t SystemRegisterEventRequest_v1 -ctypedef nvmlVgpuInstanceUtilizationSample_t VgpuInstanceUtilizationSample ctypedef nvmlUUID_v1_t UUID_v1 ctypedef nvmlSystemEventSetWaitRequest_v1_t SystemEventSetWaitRequest_v1 ctypedef nvmlGpmMetric_t GpmMetric @@ -57,6 +68,7 @@ ctypedef nvmlWorkloadPowerProfileUpdateProfiles_v1_t WorkloadPowerProfileUpdateP ctypedef nvmlPRMTLV_v1_t PRMTLV_v1 ctypedef nvmlVgpuSchedulerSetState_t VgpuSchedulerSetState ctypedef nvmlGpmMetricsGet_t GpmMetricsGet +ctypedef nvmlPRMCounterList_v1_t PRMCounterList_v1 ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t WorkloadPowerProfileProfilesInfo_v1 @@ -152,7 +164,6 @@ cpdef object device_get_attributes_v2(intptr_t device) cpdef intptr_t device_get_handle_by_index_v2(unsigned int ind_ex) except? 0 cpdef intptr_t device_get_handle_by_serial(serial) except? 0 cpdef intptr_t device_get_handle_by_uuid(uuid) except? 0 -cpdef intptr_t device_get_handle_by_uuidv(intptr_t uuid) except? 0 cpdef intptr_t device_get_handle_by_pci_bus_id_v2(pci_bus_id) except? 0 cpdef str device_get_name(intptr_t device) cpdef int device_get_brand(intptr_t device) except? -1 @@ -215,7 +226,6 @@ cpdef tuple device_get_min_max_clock_of_p_state(intptr_t device, int type, int p cpdef tuple device_get_gpc_clk_min_max_vf_offset(intptr_t device) cpdef tuple device_get_mem_clk_min_max_vf_offset(intptr_t device) cpdef device_set_clock_offsets(intptr_t device, intptr_t info) -cpdef object device_get_performance_modes(intptr_t device) cpdef object device_get_current_clock_freqs(intptr_t device) cpdef unsigned int device_get_power_management_limit(intptr_t device) except? 0 cpdef tuple device_get_power_management_limit_constraints(intptr_t device) @@ -227,8 +237,6 @@ cpdef tuple device_get_gpu_operation_mode(intptr_t device) cpdef object device_get_memory_info_v2(intptr_t device) cpdef int device_get_compute_mode(intptr_t device) except? -1 cpdef tuple device_get_cuda_compute_capability(intptr_t device) -cpdef tuple device_get_dram_encryption_mode(intptr_t device) -cpdef device_set_dram_encryption_mode(intptr_t device, intptr_t dram_encryption) cpdef tuple device_get_ecc_mode(intptr_t device) cpdef int device_get_default_ecc_mode(intptr_t device) except? -1 cpdef unsigned int device_get_board_id(intptr_t device) except? 0 @@ -267,11 +275,8 @@ cpdef object device_get_conf_compute_mem_size_info(intptr_t device) cpdef unsigned int system_get_conf_compute_gpus_ready_state() except? 0 cpdef object device_get_conf_compute_protected_memory_usage(intptr_t device) cpdef object device_get_conf_compute_gpu_certificate(intptr_t device) -cpdef object device_get_conf_compute_gpu_attestation_report(intptr_t device) -cpdef object system_get_conf_compute_key_rotation_threshold_info() cpdef device_set_conf_compute_unprotected_mem_size(intptr_t device, unsigned long long size_ki_b) cpdef system_set_conf_compute_gpus_ready_state(unsigned int is_accepting_work) -cpdef system_set_conf_compute_key_rotation_threshold_info(intptr_t p_key_rotation_thr_info) cpdef object system_get_conf_compute_settings() cpdef char device_get_gsp_firmware_version(intptr_t device) except? 0 cpdef tuple device_get_gsp_firmware_mode(intptr_t device) @@ -301,14 +306,11 @@ cpdef device_set_auto_boosted_clocks_enabled(intptr_t device, int enabled) cpdef device_set_default_auto_boosted_clocks_enabled(intptr_t device, int enabled, unsigned int flags) cpdef device_set_default_fan_speed_v2(intptr_t device, unsigned int fan) cpdef device_set_fan_control_policy(intptr_t device, unsigned int fan, unsigned int policy) -cpdef device_set_temperature_threshold(intptr_t device, int threshold_type, intptr_t temp) -cpdef device_set_power_management_limit(intptr_t device, unsigned int limit) cpdef device_set_gpu_operation_mode(intptr_t device, int mode) cpdef device_set_api_restriction(intptr_t device, int api_type, int is_restricted) cpdef device_set_fan_speed_v2(intptr_t device, unsigned int fan, unsigned int speed) cpdef device_set_accounting_mode(intptr_t device, int mode) cpdef device_clear_accounting_pids(intptr_t device) -cpdef device_set_power_management_limit_v2(intptr_t device, intptr_t power_value) cpdef int device_get_nvlink_state(intptr_t device, unsigned int link) except? -1 cpdef unsigned int device_get_nvlink_version(intptr_t device, unsigned int link) except? 0 cpdef unsigned int device_get_nvlink_capability(intptr_t device, unsigned int link, int capability) except? 0 @@ -316,7 +318,6 @@ cpdef object device_get_nvlink_remote_pci_info_v2(intptr_t device, unsigned int cpdef unsigned long long device_get_nvlink_error_counter(intptr_t device, unsigned int link, int counter) except? 0 cpdef device_reset_nvlink_error_counters(intptr_t device, unsigned int link) cpdef int device_get_nvlink_remote_device_type(intptr_t device, unsigned int link) except? -1 -cpdef device_set_nvlink_device_low_power_threshold(intptr_t device, intptr_t info) cpdef system_set_nvlink_bw_mode(unsigned int nvlink_bw_mode) cpdef unsigned int system_get_nvlink_bw_mode() except? 0 cpdef object device_get_nvlink_supported_bw_modes(intptr_t device) @@ -334,13 +335,8 @@ cpdef device_discover_gpus(intptr_t pci_info) cpdef int device_get_virtualization_mode(intptr_t device) except? -1 cpdef int device_get_host_vgpu_mode(intptr_t device) except? -1 cpdef device_set_virtualization_mode(intptr_t device, int virtual_mode) -cpdef object device_get_vgpu_heterogeneous_mode(intptr_t device) -cpdef device_set_vgpu_heterogeneous_mode(intptr_t device, intptr_t p_heterogeneous_mode) -cpdef object vgpu_instance_get_placement_id(unsigned int vgpu_instance) -cpdef object device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id) cpdef unsigned long long vgpu_type_get_gsp_heap_size(unsigned int vgpu_type_id) except? 0 cpdef unsigned long long vgpu_type_get_fb_reservation(unsigned int vgpu_type_id) except? 0 -cpdef object vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance) cpdef device_set_vgpu_capabilities(intptr_t device, int capability, int state) cpdef object device_get_grid_licensable_features_v4(intptr_t device) cpdef unsigned int get_vgpu_driver_capabilities(int capability) except? 0 @@ -352,7 +348,7 @@ cpdef tuple vgpu_type_get_device_id(unsigned int vgpu_type_id) cpdef unsigned long long vgpu_type_get_framebuffer_size(unsigned int vgpu_type_id) except? 0 cpdef unsigned int vgpu_type_get_num_display_heads(unsigned int vgpu_type_id) except? 0 cpdef tuple vgpu_type_get_resolution(unsigned int vgpu_type_id, unsigned int display_ind_ex) -cpdef vgpu_type_get_license(unsigned int vgpu_type_id, intptr_t vgpu_type_license_string, unsigned int size) +cpdef str vgpu_type_get_license(unsigned int vgpu_type_id) cpdef unsigned int vgpu_type_get_frame_rate_limit(unsigned int vgpu_type_id) except? 0 cpdef unsigned int vgpu_type_get_max_instances(intptr_t device, unsigned int vgpu_type_id) except? 0 cpdef unsigned int vgpu_type_get_max_instances_per_vm(unsigned int vgpu_type_id) except? 0 @@ -374,19 +370,15 @@ cpdef unsigned int vgpu_instance_get_gpu_instance_id(unsigned int vgpu_instance) cpdef str vgpu_instance_get_gpu_pci_id(unsigned int vgpu_instance) cpdef unsigned int vgpu_type_get_capabilities(unsigned int vgpu_type_id, int capability) except? 0 cpdef str vgpu_instance_get_mdev_uuid(unsigned int vgpu_instance) -cpdef object vgpu_type_get_max_instances_per_gpu_instance() cpdef gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_scheduler) cpdef object gpu_instance_get_vgpu_scheduler_state(intptr_t gpu_instance) cpdef object gpu_instance_get_vgpu_scheduler_log(intptr_t gpu_instance) -cpdef object gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance) -cpdef gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, intptr_t p_heterogeneous_mode) cpdef str device_get_pgpu_metadata_string(intptr_t device) cpdef object device_get_vgpu_scheduler_log(intptr_t device) cpdef object device_get_vgpu_scheduler_state(intptr_t device) cpdef object device_get_vgpu_scheduler_capabilities(intptr_t device) cpdef device_set_vgpu_scheduler_state(intptr_t device, intptr_t p_scheduler_state) cpdef set_vgpu_version(intptr_t vgpu_version) -cpdef tuple device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp) cpdef tuple device_get_vgpu_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp) cpdef int vgpu_instance_get_accounting_mode(unsigned int vgpu_instance) except? -1 cpdef object vgpu_instance_get_accounting_pids(unsigned int vgpu_instance) @@ -397,7 +389,6 @@ cpdef unsigned int get_excluded_device_count() except? 0 cpdef object get_excluded_device_info_by_index(unsigned int ind_ex) cpdef int device_set_mig_mode(intptr_t device, unsigned int mode) except? -1 cpdef tuple device_get_mig_mode(intptr_t device) -cpdef object device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile) cpdef object device_get_gpu_instance_possible_placements_v2(intptr_t device, unsigned int profile_id) cpdef unsigned int device_get_gpu_instance_remaining_capacity(intptr_t device, unsigned int profile_id) except? 0 cpdef intptr_t device_create_gpu_instance(intptr_t device, unsigned int profile_id) except? 0 @@ -419,13 +410,6 @@ cpdef unsigned int device_get_compute_instance_id(intptr_t device) except? 0 cpdef unsigned int device_get_max_mig_device_count(intptr_t device) except? 0 cpdef intptr_t device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) except? 0 cpdef intptr_t device_get_device_handle_from_mig_device_handle(intptr_t mig_device) except? 0 -cpdef gpm_sample_get(intptr_t device, intptr_t gpm_sample) -cpdef gpm_mig_sample_get(intptr_t device, unsigned int gpu_instance_id, intptr_t gpm_sample) -cpdef object gpm_query_device_support(intptr_t device) -cpdef unsigned int gpm_query_if_streaming_enabled(intptr_t device) except? 0 -cpdef gpm_set_streaming_enabled(intptr_t device, unsigned int state) -cpdef object device_get_capabilities(intptr_t device) -cpdef device_workload_power_profile_clear_requested_profiles(intptr_t device, intptr_t requested_profiles) cpdef device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile) cpdef device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile) cpdef device_power_smoothing_set_state(intptr_t device, intptr_t state) @@ -433,9 +417,3 @@ cpdef object device_get_addressing_mode(intptr_t device) cpdef object device_get_repair_status(intptr_t device) cpdef object device_get_power_mizer_mode_v1(intptr_t device) cpdef device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode) -cpdef object device_get_pdi(intptr_t device) -cpdef device_read_write_prm_v1(intptr_t device, intptr_t buffer) -cpdef object device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id) -cpdef object device_get_unrepairable_memory_flag_v1(intptr_t device) -cpdef device_read_prm_counters_v1(intptr_t device, intptr_t counter_list) -cpdef device_set_rusd_settings_v1(intptr_t device, intptr_t settings) diff --git a/cuda_bindings/cuda/bindings/_nvml.pyx b/cuda_bindings/cuda/bindings/_nvml.pyx index b7fa757d58..455ee9aee3 100644 --- a/cuda_bindings/cuda/bindings/_nvml.pyx +++ b/cuda_bindings/cuda/bindings/_nvml.pyx @@ -1,12 +1,10 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # # This code was automatically generated across versions from 12.9.1 to 13.1.0. Do not modify it directly. cimport cython # NOQA -from cython cimport view -cimport cpython from ._internal.utils cimport (get_buffer_pointer, get_nested_resource_ptr, nested_resource) @@ -20,6 +18,7 @@ from libc.stdlib cimport calloc, free, malloc from cython cimport view cimport cpython.buffer cimport cpython.memoryview +cimport cpython from libc.string cimport memcmp, memcpy import numpy as _numpy @@ -37,6 +36,7 @@ cdef __from_data(data, dtype_name, expected_dtype, lowpp_type): return lowpp_type.from_ptr(data.ctypes.data, not data.flags.writeable, data) + ############################################################################### # Enum ############################################################################### @@ -1557,6 +1557,11 @@ class PowerSmoothingProfileParam(_IntEnum): PRIMARY_FLOOR_ACT_OFFSET = 7 +class VgpuPgpu(_IntEnum): + HETEROGENEOUS_MODE = 0 # Heterogeneous vGPU mode. + HOMOGENEOUS_MODE = 1 # Homogeneous vGPU mode. + + ############################################################################### # Error handling ############################################################################### @@ -4600,142 +4605,6 @@ cdef class ClockOffset_v1: return obj -cdef _get_device_perf_modes_v1_dtype_offsets(): - cdef nvmlDevicePerfModes_v1_t pod = nvmlDevicePerfModes_v1_t() - return _numpy.dtype({ - 'names': ['version', 'str'], - 'formats': [_numpy.uint32, (_numpy.int8, 2048)], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.str)) - (&pod), - ], - 'itemsize': sizeof(nvmlDevicePerfModes_v1_t), - }) - -device_perf_modes_v1_dtype = _get_device_perf_modes_v1_dtype_offsets() - -cdef class DevicePerfModes_v1: - """Empty-initialize an instance of `nvmlDevicePerfModes_v1_t`. - - - .. seealso:: `nvmlDevicePerfModes_v1_t` - """ - cdef: - nvmlDevicePerfModes_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlDevicePerfModes_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating DevicePerfModes_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlDevicePerfModes_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.DevicePerfModes_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef DevicePerfModes_v1 other_ - if not isinstance(other, DevicePerfModes_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlDevicePerfModes_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlDevicePerfModes_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating DevicePerfModes_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlDevicePerfModes_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: the API version number""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This DevicePerfModes_v1 instance is read-only") - self._ptr[0].version = val - - @property - def str(self): - """~_numpy.int8: (array of length 2048).OUT: the performance modes string.""" - return cpython.PyUnicode_FromString(self._ptr[0].str) - - @str.setter - def str(self, val): - if self._readonly: - raise ValueError("This DevicePerfModes_v1 instance is read-only") - cdef bytes buf = val.encode() - if len(buf) >= 2048: - raise ValueError("String too long for field str, max length is 2047") - cdef char *ptr = buf - memcpy((self._ptr[0].str), ptr, 2048) - - @staticmethod - def from_data(data): - """Create an DevicePerfModes_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `device_perf_modes_v1_dtype` holding the data. - """ - return __from_data(data, "device_perf_modes_v1_dtype", device_perf_modes_v1_dtype, DevicePerfModes_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an DevicePerfModes_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef DevicePerfModes_v1 obj = DevicePerfModes_v1.__new__(DevicePerfModes_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlDevicePerfModes_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating DevicePerfModes_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlDevicePerfModes_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - cdef _get_device_current_clock_freqs_v1_dtype_offsets(): cdef nvmlDeviceCurrentClockFreqs_v1_t pod = nvmlDeviceCurrentClockFreqs_v1_t() return _numpy.dtype({ @@ -5642,7 +5511,10 @@ cdef class PlatformInfo_v1: @property def ib_guid(self): """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)""" - cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 16 == 0: + return _numpy.array([]) + arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].ibGuid)) return _numpy.asarray(arr) @@ -5650,6 +5522,8 @@ cdef class PlatformInfo_v1: def ib_guid(self, val): if self._readonly: raise ValueError("This PlatformInfo_v1 instance is read-only") + if 16 == 0: + return cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].ibGuid)), (arr.data), sizeof(unsigned char) * len(val)) @@ -5657,7 +5531,10 @@ cdef class PlatformInfo_v1: @property def rack_guid(self): """~_numpy.uint8: (array of length 16).GUID of the rack containing this GPU (for Blackwell rackGuid is 13 bytes so indices 13-15 are zero)""" - cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 16 == 0: + return _numpy.array([]) + arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].rackGuid)) return _numpy.asarray(arr) @@ -5665,6 +5542,8 @@ cdef class PlatformInfo_v1: def rack_guid(self, val): if self._readonly: raise ValueError("This PlatformInfo_v1 instance is read-only") + if 16 == 0: + return cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].rackGuid)), (arr.data), sizeof(unsigned char) * len(val)) @@ -5854,7 +5733,10 @@ cdef class PlatformInfo_v2: @property def ib_guid(self): """~_numpy.uint8: (array of length 16).Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero)""" - cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 16 == 0: + return _numpy.array([]) + arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].ibGuid)) return _numpy.asarray(arr) @@ -5862,6 +5744,8 @@ cdef class PlatformInfo_v2: def ib_guid(self, val): if self._readonly: raise ValueError("This PlatformInfo_v2 instance is read-only") + if 16 == 0: + return cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].ibGuid)), (arr.data), sizeof(unsigned char) * len(val)) @@ -5869,7 +5753,10 @@ cdef class PlatformInfo_v2: @property def chassis_serial_number(self): """~_numpy.uint8: (array of length 16).Serial number of the chassis containing this GPU (for Blackwell it is 13 bytes so indices 13-15 are zero)""" - cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 16 == 0: + return _numpy.array([]) + arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].chassisSerialNumber)) return _numpy.asarray(arr) @@ -5877,6 +5764,8 @@ cdef class PlatformInfo_v2: def chassis_serial_number(self, val): if self._readonly: raise ValueError("This PlatformInfo_v2 instance is read-only") + if 16 == 0: + return cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].chassisSerialNumber)), (arr.data), sizeof(unsigned char) * len(val)) @@ -6128,49 +6017,54 @@ cdef class _py_anon_pod1: return obj -cdef _get_vgpu_heterogeneous_mode_v1_dtype_offsets(): - cdef nvmlVgpuHeterogeneousMode_v1_t pod = nvmlVgpuHeterogeneousMode_v1_t() +cdef _get_vgpu_placement_list_v2_dtype_offsets(): + cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t() return _numpy.dtype({ - 'names': ['version', 'mode'], - 'formats': [_numpy.uint32, _numpy.uint32], + 'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'], + 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32], 'offsets': [ (&(pod.version)) - (&pod), + (&(pod.placementSize)) - (&pod), + (&(pod.count)) - (&pod), + (&(pod.placementIds)) - (&pod), (&(pod.mode)) - (&pod), ], - 'itemsize': sizeof(nvmlVgpuHeterogeneousMode_v1_t), + 'itemsize': sizeof(nvmlVgpuPlacementList_v2_t), }) -vgpu_heterogeneous_mode_v1_dtype = _get_vgpu_heterogeneous_mode_v1_dtype_offsets() +vgpu_placement_list_v2_dtype = _get_vgpu_placement_list_v2_dtype_offsets() -cdef class VgpuHeterogeneousMode_v1: - """Empty-initialize an instance of `nvmlVgpuHeterogeneousMode_v1_t`. +cdef class VgpuPlacementList_v2: + """Empty-initialize an instance of `nvmlVgpuPlacementList_v2_t`. - .. seealso:: `nvmlVgpuHeterogeneousMode_v1_t` + .. seealso:: `nvmlVgpuPlacementList_v2_t` """ cdef: - nvmlVgpuHeterogeneousMode_v1_t *_ptr + nvmlVgpuPlacementList_v2_t *_ptr object _owner bint _owned bint _readonly + dict _refs def __init__(self): - self._ptr = calloc(1, sizeof(nvmlVgpuHeterogeneousMode_v1_t)) + self._ptr = calloc(1, sizeof(nvmlVgpuPlacementList_v2_t)) if self._ptr == NULL: - raise MemoryError("Error allocating VgpuHeterogeneousMode_v1") + raise MemoryError("Error allocating VgpuPlacementList_v2") self._owner = None self._owned = True self._readonly = False + self._refs = {} def __dealloc__(self): - cdef nvmlVgpuHeterogeneousMode_v1_t *ptr + cdef nvmlVgpuPlacementList_v2_t *ptr if self._owned and self._ptr != NULL: ptr = self._ptr self._ptr = NULL free(ptr) def __repr__(self): - return f"<{__name__}.VgpuHeterogeneousMode_v1 object at {hex(id(self))}>" + return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>" @property def ptr(self): @@ -6184,18 +6078,18 @@ cdef class VgpuHeterogeneousMode_v1: return (self._ptr) def __eq__(self, other): - cdef VgpuHeterogeneousMode_v1 other_ - if not isinstance(other, VgpuHeterogeneousMode_v1): + cdef VgpuPlacementList_v2 other_ + if not isinstance(other, VgpuPlacementList_v2): return False other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuHeterogeneousMode_v1_t)) == 0) + return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0) def __setitem__(self, key, val): if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t)) + self._ptr = malloc(sizeof(nvmlVgpuPlacementList_v2_t)) if self._ptr == NULL: - raise MemoryError("Error allocating VgpuHeterogeneousMode_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuHeterogeneousMode_v1_t)) + raise MemoryError("Error allocating VgpuPlacementList_v2") + memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t)) self._owner = None self._owned = True self._readonly = not val.flags.writeable @@ -6204,38 +6098,68 @@ cdef class VgpuHeterogeneousMode_v1: @property def version(self): - """int: The version number of this struct.""" + """int: IN: The version number of this struct.""" return self._ptr[0].version @version.setter def version(self, val): if self._readonly: - raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only") + raise ValueError("This VgpuPlacementList_v2 instance is read-only") self._ptr[0].version = val + @property + def placement_size(self): + """int: OUT: The number of slots occupied by the vGPU type.""" + return self._ptr[0].placementSize + + @placement_size.setter + def placement_size(self, val): + if self._readonly: + raise ValueError("This VgpuPlacementList_v2 instance is read-only") + self._ptr[0].placementSize = val + + @property + def placement_ids(self): + """int: IN/OUT: Placement IDs for the vGPU type.""" + if self._ptr[0].placementIds == NULL: + return [] + cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False) + arr.data = (self._ptr[0].placementIds) + return _numpy.asarray(arr) + + @placement_ids.setter + def placement_ids(self, val): + if self._readonly: + raise ValueError("This VgpuPlacementList_v2 instance is read-only") + cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c") + arr[:] = _numpy.asarray(val, dtype=_numpy.uint32) + self._ptr[0].placementIds = (arr.data) + self._ptr[0].count = len(val) + self._refs["placement_ids"] = arr + @property def mode(self): - """int: The vGPU heterogeneous mode.""" + """int: IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE.""" return self._ptr[0].mode @mode.setter def mode(self, val): if self._readonly: - raise ValueError("This VgpuHeterogeneousMode_v1 instance is read-only") + raise ValueError("This VgpuPlacementList_v2 instance is read-only") self._ptr[0].mode = val @staticmethod def from_data(data): - """Create an VgpuHeterogeneousMode_v1 instance wrapping the given NumPy array. + """Create an VgpuPlacementList_v2 instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `vgpu_heterogeneous_mode_v1_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_list_v2_dtype` holding the data. """ - return __from_data(data, "vgpu_heterogeneous_mode_v1_dtype", vgpu_heterogeneous_mode_v1_dtype, VgpuHeterogeneousMode_v1) + return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an VgpuHeterogeneousMode_v1 instance wrapping the given pointer. + """Create an VgpuPlacementList_v2 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -6244,65 +6168,66 @@ cdef class VgpuHeterogeneousMode_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef VgpuHeterogeneousMode_v1 obj = VgpuHeterogeneousMode_v1.__new__(VgpuHeterogeneousMode_v1) + cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2) if owner is None: - obj._ptr = malloc(sizeof(nvmlVgpuHeterogeneousMode_v1_t)) + obj._ptr = malloc(sizeof(nvmlVgpuPlacementList_v2_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating VgpuHeterogeneousMode_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlVgpuHeterogeneousMode_v1_t)) + raise MemoryError("Error allocating VgpuPlacementList_v2") + memcpy((obj._ptr), ptr, sizeof(nvmlVgpuPlacementList_v2_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly + obj._refs = {} return obj -cdef _get_vgpu_placement_id_v1_dtype_offsets(): - cdef nvmlVgpuPlacementId_v1_t pod = nvmlVgpuPlacementId_v1_t() +cdef _get_vgpu_type_bar1info_v1_dtype_offsets(): + cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t() return _numpy.dtype({ - 'names': ['version', 'placement_id'], - 'formats': [_numpy.uint32, _numpy.uint32], + 'names': ['version', 'bar1size'], + 'formats': [_numpy.uint32, _numpy.uint64], 'offsets': [ (&(pod.version)) - (&pod), - (&(pod.placementId)) - (&pod), + (&(pod.bar1Size)) - (&pod), ], - 'itemsize': sizeof(nvmlVgpuPlacementId_v1_t), + 'itemsize': sizeof(nvmlVgpuTypeBar1Info_v1_t), }) -vgpu_placement_id_v1_dtype = _get_vgpu_placement_id_v1_dtype_offsets() +vgpu_type_bar1info_v1_dtype = _get_vgpu_type_bar1info_v1_dtype_offsets() -cdef class VgpuPlacementId_v1: - """Empty-initialize an instance of `nvmlVgpuPlacementId_v1_t`. +cdef class VgpuTypeBar1Info_v1: + """Empty-initialize an instance of `nvmlVgpuTypeBar1Info_v1_t`. - .. seealso:: `nvmlVgpuPlacementId_v1_t` + .. seealso:: `nvmlVgpuTypeBar1Info_v1_t` """ cdef: - nvmlVgpuPlacementId_v1_t *_ptr + nvmlVgpuTypeBar1Info_v1_t *_ptr object _owner bint _owned bint _readonly def __init__(self): - self._ptr = calloc(1, sizeof(nvmlVgpuPlacementId_v1_t)) + self._ptr = calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t)) if self._ptr == NULL: - raise MemoryError("Error allocating VgpuPlacementId_v1") + raise MemoryError("Error allocating VgpuTypeBar1Info_v1") self._owner = None self._owned = True self._readonly = False def __dealloc__(self): - cdef nvmlVgpuPlacementId_v1_t *ptr + cdef nvmlVgpuTypeBar1Info_v1_t *ptr if self._owned and self._ptr != NULL: ptr = self._ptr self._ptr = NULL free(ptr) def __repr__(self): - return f"<{__name__}.VgpuPlacementId_v1 object at {hex(id(self))}>" + return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>" @property def ptr(self): @@ -6316,18 +6241,18 @@ cdef class VgpuPlacementId_v1: return (self._ptr) def __eq__(self, other): - cdef VgpuPlacementId_v1 other_ - if not isinstance(other, VgpuPlacementId_v1): + cdef VgpuTypeBar1Info_v1 other_ + if not isinstance(other, VgpuTypeBar1Info_v1): return False other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuPlacementId_v1_t)) == 0) + return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0) def __setitem__(self, key, val): if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlVgpuPlacementId_v1_t)) + self._ptr = malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t)) if self._ptr == NULL: - raise MemoryError("Error allocating VgpuPlacementId_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuPlacementId_v1_t)) + raise MemoryError("Error allocating VgpuTypeBar1Info_v1") + memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t)) self._owner = None self._owned = True self._readonly = not val.flags.writeable @@ -6342,32 +6267,32 @@ cdef class VgpuPlacementId_v1: @version.setter def version(self, val): if self._readonly: - raise ValueError("This VgpuPlacementId_v1 instance is read-only") + raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only") self._ptr[0].version = val @property - def placement_id(self): - """int: Placement ID of the active vGPU instance.""" - return self._ptr[0].placementId + def bar1size(self): + """int: BAR1 size in megabytes.""" + return self._ptr[0].bar1Size - @placement_id.setter - def placement_id(self, val): + @bar1size.setter + def bar1size(self, val): if self._readonly: - raise ValueError("This VgpuPlacementId_v1 instance is read-only") - self._ptr[0].placementId = val + raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only") + self._ptr[0].bar1Size = val @staticmethod def from_data(data): - """Create an VgpuPlacementId_v1 instance wrapping the given NumPy array. + """Create an VgpuTypeBar1Info_v1 instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_id_v1_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `vgpu_type_bar1info_v1_dtype` holding the data. """ - return __from_data(data, "vgpu_placement_id_v1_dtype", vgpu_placement_id_v1_dtype, VgpuPlacementId_v1) + return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an VgpuPlacementId_v1 instance wrapping the given pointer. + """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -6376,349 +6301,49 @@ cdef class VgpuPlacementId_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef VgpuPlacementId_v1 obj = VgpuPlacementId_v1.__new__(VgpuPlacementId_v1) + cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1) if owner is None: - obj._ptr = malloc(sizeof(nvmlVgpuPlacementId_v1_t)) + obj._ptr = malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating VgpuPlacementId_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlVgpuPlacementId_v1_t)) + raise MemoryError("Error allocating VgpuTypeBar1Info_v1") + memcpy((obj._ptr), ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly return obj -cdef _get_vgpu_placement_list_v2_dtype_offsets(): - cdef nvmlVgpuPlacementList_v2_t pod = nvmlVgpuPlacementList_v2_t() +cdef _get_vgpu_process_utilization_info_v1_dtype_offsets(): + cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t() return _numpy.dtype({ - 'names': ['version', 'placement_size', 'count', 'placement_ids', 'mode'], - 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.intp, _numpy.uint32], + 'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'], + 'formats': [(_numpy.int8, 64), _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32], 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.placementSize)) - (&pod), - (&(pod.count)) - (&pod), - (&(pod.placementIds)) - (&pod), - (&(pod.mode)) - (&pod), + (&(pod.processName)) - (&pod), + (&(pod.timeStamp)) - (&pod), + (&(pod.vgpuInstance)) - (&pod), + (&(pod.pid)) - (&pod), + (&(pod.smUtil)) - (&pod), + (&(pod.memUtil)) - (&pod), + (&(pod.encUtil)) - (&pod), + (&(pod.decUtil)) - (&pod), + (&(pod.jpgUtil)) - (&pod), + (&(pod.ofaUtil)) - (&pod), ], - 'itemsize': sizeof(nvmlVgpuPlacementList_v2_t), + 'itemsize': sizeof(nvmlVgpuProcessUtilizationInfo_v1_t), }) -vgpu_placement_list_v2_dtype = _get_vgpu_placement_list_v2_dtype_offsets() +vgpu_process_utilization_info_v1_dtype = _get_vgpu_process_utilization_info_v1_dtype_offsets() -cdef class VgpuPlacementList_v2: - """Empty-initialize an instance of `nvmlVgpuPlacementList_v2_t`. +cdef class VgpuProcessUtilizationInfo_v1: + """Empty-initialize an array of `nvmlVgpuProcessUtilizationInfo_v1_t`. - - .. seealso:: `nvmlVgpuPlacementList_v2_t` - """ - cdef: - nvmlVgpuPlacementList_v2_t *_ptr - object _owner - bint _owned - bint _readonly - dict _refs - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlVgpuPlacementList_v2_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuPlacementList_v2") - self._owner = None - self._owned = True - self._readonly = False - self._refs = {} - - def __dealloc__(self): - cdef nvmlVgpuPlacementList_v2_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.VgpuPlacementList_v2 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef VgpuPlacementList_v2 other_ - if not isinstance(other, VgpuPlacementList_v2): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuPlacementList_v2_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlVgpuPlacementList_v2_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuPlacementList_v2") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuPlacementList_v2_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: IN: The version number of this struct.""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This VgpuPlacementList_v2 instance is read-only") - self._ptr[0].version = val - - @property - def placement_size(self): - """int: OUT: The number of slots occupied by the vGPU type.""" - return self._ptr[0].placementSize - - @placement_size.setter - def placement_size(self, val): - if self._readonly: - raise ValueError("This VgpuPlacementList_v2 instance is read-only") - self._ptr[0].placementSize = val - - @property - def placement_ids(self): - """int: IN/OUT: Placement IDs for the vGPU type.""" - if self._ptr[0].placementIds == NULL: - return [] - cdef view.array arr = view.array(shape=(self._ptr[0].count,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False) - arr.data = (self._ptr[0].placementIds) - return _numpy.asarray(arr) - - @placement_ids.setter - def placement_ids(self, val): - if self._readonly: - raise ValueError("This VgpuPlacementList_v2 instance is read-only") - cdef view.array arr = view.array(shape=(len(val),), itemsize=sizeof(unsigned int), format="I", mode="c") - arr[:] = _numpy.asarray(val, dtype=_numpy.uint32) - self._ptr[0].placementIds = (arr.data) - self._ptr[0].count = len(val) - self._refs["placement_ids"] = arr - - @property - def mode(self): - """int: IN: The vGPU mode. Either NVML_VGPU_PGPU_HETEROGENEOUS_MODE or NVML_VGPU_PGPU_HOMOGENEOUS_MODE.""" - return self._ptr[0].mode - - @mode.setter - def mode(self, val): - if self._readonly: - raise ValueError("This VgpuPlacementList_v2 instance is read-only") - self._ptr[0].mode = val - - @staticmethod - def from_data(data): - """Create an VgpuPlacementList_v2 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `vgpu_placement_list_v2_dtype` holding the data. - """ - return __from_data(data, "vgpu_placement_list_v2_dtype", vgpu_placement_list_v2_dtype, VgpuPlacementList_v2) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an VgpuPlacementList_v2 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef VgpuPlacementList_v2 obj = VgpuPlacementList_v2.__new__(VgpuPlacementList_v2) - if owner is None: - obj._ptr = malloc(sizeof(nvmlVgpuPlacementList_v2_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating VgpuPlacementList_v2") - memcpy((obj._ptr), ptr, sizeof(nvmlVgpuPlacementList_v2_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - obj._refs = {} - return obj - - -cdef _get_vgpu_type_bar1info_v1_dtype_offsets(): - cdef nvmlVgpuTypeBar1Info_v1_t pod = nvmlVgpuTypeBar1Info_v1_t() - return _numpy.dtype({ - 'names': ['version', 'bar1size'], - 'formats': [_numpy.uint32, _numpy.uint64], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.bar1Size)) - (&pod), - ], - 'itemsize': sizeof(nvmlVgpuTypeBar1Info_v1_t), - }) - -vgpu_type_bar1info_v1_dtype = _get_vgpu_type_bar1info_v1_dtype_offsets() - -cdef class VgpuTypeBar1Info_v1: - """Empty-initialize an instance of `nvmlVgpuTypeBar1Info_v1_t`. - - - .. seealso:: `nvmlVgpuTypeBar1Info_v1_t` - """ - cdef: - nvmlVgpuTypeBar1Info_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlVgpuTypeBar1Info_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuTypeBar1Info_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlVgpuTypeBar1Info_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.VgpuTypeBar1Info_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef VgpuTypeBar1Info_v1 other_ - if not isinstance(other, VgpuTypeBar1Info_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuTypeBar1Info_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuTypeBar1Info_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuTypeBar1Info_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: The version number of this struct.""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only") - self._ptr[0].version = val - - @property - def bar1size(self): - """int: BAR1 size in megabytes.""" - return self._ptr[0].bar1Size - - @bar1size.setter - def bar1size(self, val): - if self._readonly: - raise ValueError("This VgpuTypeBar1Info_v1 instance is read-only") - self._ptr[0].bar1Size = val - - @staticmethod - def from_data(data): - """Create an VgpuTypeBar1Info_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `vgpu_type_bar1info_v1_dtype` holding the data. - """ - return __from_data(data, "vgpu_type_bar1info_v1_dtype", vgpu_type_bar1info_v1_dtype, VgpuTypeBar1Info_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an VgpuTypeBar1Info_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef VgpuTypeBar1Info_v1 obj = VgpuTypeBar1Info_v1.__new__(VgpuTypeBar1Info_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlVgpuTypeBar1Info_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating VgpuTypeBar1Info_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlVgpuTypeBar1Info_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - -cdef _get_vgpu_process_utilization_info_v1_dtype_offsets(): - cdef nvmlVgpuProcessUtilizationInfo_v1_t pod = nvmlVgpuProcessUtilizationInfo_v1_t() - return _numpy.dtype({ - 'names': ['process_name', 'time_stamp', 'vgpu_instance', 'pid', 'sm_util', 'mem_util', 'enc_util', 'dec_util', 'jpg_util', 'ofa_util'], - 'formats': [(_numpy.int8, 64), _numpy.uint64, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32], - 'offsets': [ - (&(pod.processName)) - (&pod), - (&(pod.timeStamp)) - (&pod), - (&(pod.vgpuInstance)) - (&pod), - (&(pod.pid)) - (&pod), - (&(pod.smUtil)) - (&pod), - (&(pod.memUtil)) - (&pod), - (&(pod.encUtil)) - (&pod), - (&(pod.decUtil)) - (&pod), - (&(pod.jpgUtil)) - (&pod), - (&(pod.ofaUtil)) - (&pod), - ], - 'itemsize': sizeof(nvmlVgpuProcessUtilizationInfo_v1_t), - }) - -vgpu_process_utilization_info_v1_dtype = _get_vgpu_process_utilization_info_v1_dtype_offsets() - -cdef class VgpuProcessUtilizationInfo_v1: - """Empty-initialize an array of `nvmlVgpuProcessUtilizationInfo_v1_t`. - - The resulting object is of length `size` and of dtype `vgpu_process_utilization_info_v1_dtype`. - If default-constructed, the instance represents a single struct. + The resulting object is of length `size` and of dtype `vgpu_process_utilization_info_v1_dtype`. + If default-constructed, the instance represents a single struct. Args: size (int): number of structs, default=1. @@ -6921,146 +6546,14 @@ cdef class VgpuProcessUtilizationInfo_v1: readonly (bool): whether the data is read-only (to the user). default is `False`. """ if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1) - cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE - cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( - ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag) - data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_process_utilization_info_v1_dtype) - obj._data = data.view(_numpy.recarray) - - return obj - - -cdef _get_vgpu_runtime_state_v1_dtype_offsets(): - cdef nvmlVgpuRuntimeState_v1_t pod = nvmlVgpuRuntimeState_v1_t() - return _numpy.dtype({ - 'names': ['version', 'size_'], - 'formats': [_numpy.uint32, _numpy.uint64], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.size)) - (&pod), - ], - 'itemsize': sizeof(nvmlVgpuRuntimeState_v1_t), - }) - -vgpu_runtime_state_v1_dtype = _get_vgpu_runtime_state_v1_dtype_offsets() - -cdef class VgpuRuntimeState_v1: - """Empty-initialize an instance of `nvmlVgpuRuntimeState_v1_t`. - - - .. seealso:: `nvmlVgpuRuntimeState_v1_t` - """ - cdef: - nvmlVgpuRuntimeState_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlVgpuRuntimeState_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuRuntimeState_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlVgpuRuntimeState_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.VgpuRuntimeState_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef VgpuRuntimeState_v1 other_ - if not isinstance(other, VgpuRuntimeState_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuRuntimeState_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlVgpuRuntimeState_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuRuntimeState_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuRuntimeState_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: IN: The version number of this struct.""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This VgpuRuntimeState_v1 instance is read-only") - self._ptr[0].version = val - - @property - def size_(self): - """int: OUT: The runtime state size of the vGPU instance.""" - return self._ptr[0].size - - @size_.setter - def size_(self, val): - if self._readonly: - raise ValueError("This VgpuRuntimeState_v1 instance is read-only") - self._ptr[0].size = val - - @staticmethod - def from_data(data): - """Create an VgpuRuntimeState_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `vgpu_runtime_state_v1_dtype` holding the data. - """ - return __from_data(data, "vgpu_runtime_state_v1_dtype", vgpu_runtime_state_v1_dtype, VgpuRuntimeState_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an VgpuRuntimeState_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef VgpuRuntimeState_v1 obj = VgpuRuntimeState_v1.__new__(VgpuRuntimeState_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlVgpuRuntimeState_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating VgpuRuntimeState_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlVgpuRuntimeState_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly + raise ValueError("ptr must not be null (0)") + cdef VgpuProcessUtilizationInfo_v1 obj = VgpuProcessUtilizationInfo_v1.__new__(VgpuProcessUtilizationInfo_v1) + cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE + cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( + ptr, sizeof(nvmlVgpuProcessUtilizationInfo_v1_t) * size, flag) + data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_process_utilization_info_v1_dtype) + obj._data = data.view(_numpy.recarray) + return obj @@ -7845,7 +7338,10 @@ cdef class VgpuSchedulerCapabilities: @property def supported_schedulers(self): """~_numpy.uint32: (array of length 3).""" - cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False) + cdef view.array arr + if 3 == 0: + return _numpy.array([]) + arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].supportedSchedulers)) return _numpy.asarray(arr) @@ -7853,6 +7349,8 @@ cdef class VgpuSchedulerCapabilities: def supported_schedulers(self, val): if self._readonly: raise ValueError("This VgpuSchedulerCapabilities instance is read-only") + if 3 == 0: + return cdef view.array arr = view.array(shape=(3,), itemsize=sizeof(unsigned int), format="I", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint32) memcpy((&(self._ptr[0].supportedSchedulers)), (arr.data), sizeof(unsigned int) * len(val)) @@ -8487,150 +7985,6 @@ cdef class VgpuTypeIdInfo_v1: return obj -cdef _get_vgpu_type_max_instance_v1_dtype_offsets(): - cdef nvmlVgpuTypeMaxInstance_v1_t pod = nvmlVgpuTypeMaxInstance_v1_t() - return _numpy.dtype({ - 'names': ['version', 'vgpu_type_id', 'max_instance_per_gi'], - 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.vgpuTypeId)) - (&pod), - (&(pod.maxInstancePerGI)) - (&pod), - ], - 'itemsize': sizeof(nvmlVgpuTypeMaxInstance_v1_t), - }) - -vgpu_type_max_instance_v1_dtype = _get_vgpu_type_max_instance_v1_dtype_offsets() - -cdef class VgpuTypeMaxInstance_v1: - """Empty-initialize an instance of `nvmlVgpuTypeMaxInstance_v1_t`. - - - .. seealso:: `nvmlVgpuTypeMaxInstance_v1_t` - """ - cdef: - nvmlVgpuTypeMaxInstance_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlVgpuTypeMaxInstance_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuTypeMaxInstance_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlVgpuTypeMaxInstance_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.VgpuTypeMaxInstance_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef VgpuTypeMaxInstance_v1 other_ - if not isinstance(other, VgpuTypeMaxInstance_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlVgpuTypeMaxInstance_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating VgpuTypeMaxInstance_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlVgpuTypeMaxInstance_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: IN: The version number of this struct.""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only") - self._ptr[0].version = val - - @property - def vgpu_type_id(self): - """int: IN: Handle to vGPU type.""" - return (self._ptr[0].vgpuTypeId) - - @vgpu_type_id.setter - def vgpu_type_id(self, val): - if self._readonly: - raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only") - self._ptr[0].vgpuTypeId = val - - @property - def max_instance_per_gi(self): - """int: OUT: Maximum number of vGPU instances per GPU instance.""" - return self._ptr[0].maxInstancePerGI - - @max_instance_per_gi.setter - def max_instance_per_gi(self, val): - if self._readonly: - raise ValueError("This VgpuTypeMaxInstance_v1 instance is read-only") - self._ptr[0].maxInstancePerGI = val - - @staticmethod - def from_data(data): - """Create an VgpuTypeMaxInstance_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `vgpu_type_max_instance_v1_dtype` holding the data. - """ - return __from_data(data, "vgpu_type_max_instance_v1_dtype", vgpu_type_max_instance_v1_dtype, VgpuTypeMaxInstance_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an VgpuTypeMaxInstance_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef VgpuTypeMaxInstance_v1 obj = VgpuTypeMaxInstance_v1.__new__(VgpuTypeMaxInstance_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlVgpuTypeMaxInstance_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating VgpuTypeMaxInstance_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlVgpuTypeMaxInstance_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - cdef _get_active_vgpu_instance_info_v1_dtype_offsets(): cdef nvmlActiveVgpuInstanceInfo_v1_t pod = nvmlActiveVgpuInstanceInfo_v1_t() return _numpy.dtype({ @@ -11465,32 +10819,13 @@ cdef class ConfComputeGpuCertificate: else: setattr(self, key, val) - @property - def cert_chain_size(self): - """int: """ - return self._ptr[0].certChainSize - - @cert_chain_size.setter - def cert_chain_size(self, val): - if self._readonly: - raise ValueError("This ConfComputeGpuCertificate instance is read-only") - self._ptr[0].certChainSize = val - - @property - def attestation_cert_chain_size(self): - """int: """ - return self._ptr[0].attestationCertChainSize - - @attestation_cert_chain_size.setter - def attestation_cert_chain_size(self, val): - if self._readonly: - raise ValueError("This ConfComputeGpuCertificate instance is read-only") - self._ptr[0].attestationCertChainSize = val - @property def cert_chain(self): """~_numpy.uint8: (array of length 4096).""" - cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if self._ptr[0].certChainSize == 0: + return _numpy.array([]) + arr = view.array(shape=(self._ptr[0].certChainSize,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].certChain)) return _numpy.asarray(arr) @@ -11498,14 +10833,19 @@ cdef class ConfComputeGpuCertificate: def cert_chain(self, val): if self._readonly: raise ValueError("This ConfComputeGpuCertificate instance is read-only") - cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c") + if self._ptr[0].certChainSize == 0: + return + cdef view.array arr = view.array(shape=(self._ptr[0].certChainSize,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].certChain)), (arr.data), sizeof(unsigned char) * len(val)) @property def attestation_cert_chain(self): """~_numpy.uint8: (array of length 5120).""" - cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if self._ptr[0].attestationCertChainSize == 0: + return _numpy.array([]) + arr = view.array(shape=(self._ptr[0].attestationCertChainSize,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].attestationCertChain)) return _numpy.asarray(arr) @@ -11513,7 +10853,9 @@ cdef class ConfComputeGpuCertificate: def attestation_cert_chain(self, val): if self._readonly: raise ValueError("This ConfComputeGpuCertificate instance is read-only") - cdef view.array arr = view.array(shape=(5120,), itemsize=sizeof(unsigned char), format="B", mode="c") + if self._ptr[0].attestationCertChainSize == 0: + return + cdef view.array arr = view.array(shape=(self._ptr[0].attestationCertChainSize,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].attestationCertChain)), (arr.data), sizeof(unsigned char) * len(val)) @@ -11642,32 +10984,13 @@ cdef class ConfComputeGpuAttestationReport: raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") self._ptr[0].isCecAttestationReportPresent = val - @property - def attestation_report_size(self): - """int: """ - return self._ptr[0].attestationReportSize - - @attestation_report_size.setter - def attestation_report_size(self, val): - if self._readonly: - raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") - self._ptr[0].attestationReportSize = val - - @property - def cec_attestation_report_size(self): - """int: """ - return self._ptr[0].cecAttestationReportSize - - @cec_attestation_report_size.setter - def cec_attestation_report_size(self, val): - if self._readonly: - raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") - self._ptr[0].cecAttestationReportSize = val - @property def nonce(self): """~_numpy.uint8: (array of length 32).""" - cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 32 == 0: + return _numpy.array([]) + arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].nonce)) return _numpy.asarray(arr) @@ -11675,184 +10998,64 @@ cdef class ConfComputeGpuAttestationReport: def nonce(self, val): if self._readonly: raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") + if 32 == 0: + return cdef view.array arr = view.array(shape=(32,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].nonce)), (arr.data), sizeof(unsigned char) * len(val)) @property def attestation_report(self): - """~_numpy.uint8: (array of length 8192).""" - cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) - arr.data = (&(self._ptr[0].attestationReport)) - return _numpy.asarray(arr) - - @attestation_report.setter - def attestation_report(self, val): - if self._readonly: - raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") - cdef view.array arr = view.array(shape=(8192,), itemsize=sizeof(unsigned char), format="B", mode="c") - arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) - memcpy((&(self._ptr[0].attestationReport)), (arr.data), sizeof(unsigned char) * len(val)) - - @property - def cec_attestation_report(self): - """~_numpy.uint8: (array of length 4096).""" - cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) - arr.data = (&(self._ptr[0].cecAttestationReport)) - return _numpy.asarray(arr) - - @cec_attestation_report.setter - def cec_attestation_report(self, val): - if self._readonly: - raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") - cdef view.array arr = view.array(shape=(4096,), itemsize=sizeof(unsigned char), format="B", mode="c") - arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) - memcpy((&(self._ptr[0].cecAttestationReport)), (arr.data), sizeof(unsigned char) * len(val)) - - @staticmethod - def from_data(data): - """Create an ConfComputeGpuAttestationReport instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `conf_compute_gpu_attestation_report_dtype` holding the data. - """ - return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport) - if owner is None: - obj._ptr = malloc(sizeof(nvmlConfComputeGpuAttestationReport_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating ConfComputeGpuAttestationReport") - memcpy((obj._ptr), ptr, sizeof(nvmlConfComputeGpuAttestationReport_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - -cdef _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets(): - cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t pod = nvmlConfComputeGetKeyRotationThresholdInfo_v1_t() - return _numpy.dtype({ - 'names': ['version', 'attacker_advantage'], - 'formats': [_numpy.uint32, _numpy.uint64], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.attackerAdvantage)) - (&pod), - ], - 'itemsize': sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t), - }) - -conf_compute_get_key_rotation_threshold_info_v1_dtype = _get_conf_compute_get_key_rotation_threshold_info_v1_dtype_offsets() - -cdef class ConfComputeGetKeyRotationThresholdInfo_v1: - """Empty-initialize an instance of `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t`. - - - .. seealso:: `nvmlConfComputeGetKeyRotationThresholdInfo_v1_t` - """ - cdef: - nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.ConfComputeGetKeyRotationThresholdInfo_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef ConfComputeGetKeyRotationThresholdInfo_v1 other_ - if not isinstance(other, ConfComputeGetKeyRotationThresholdInfo_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: """ - return self._ptr[0].version + """~_numpy.uint8: (array of length 8192).""" + cdef view.array arr + if self._ptr[0].attestationReportSize == 0: + return _numpy.array([]) + arr = view.array(shape=(self._ptr[0].attestationReportSize,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + arr.data = (&(self._ptr[0].attestationReport)) + return _numpy.asarray(arr) - @version.setter - def version(self, val): + @attestation_report.setter + def attestation_report(self, val): if self._readonly: - raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only") - self._ptr[0].version = val + raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") + if self._ptr[0].attestationReportSize == 0: + return + cdef view.array arr = view.array(shape=(self._ptr[0].attestationReportSize,), itemsize=sizeof(unsigned char), format="B", mode="c") + arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) + memcpy((&(self._ptr[0].attestationReport)), (arr.data), sizeof(unsigned char) * len(val)) @property - def attacker_advantage(self): - """int: """ - return self._ptr[0].attackerAdvantage + def cec_attestation_report(self): + """~_numpy.uint8: (array of length 4096).""" + cdef view.array arr + if self._ptr[0].cecAttestationReportSize == 0: + return _numpy.array([]) + arr = view.array(shape=(self._ptr[0].cecAttestationReportSize,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + arr.data = (&(self._ptr[0].cecAttestationReport)) + return _numpy.asarray(arr) - @attacker_advantage.setter - def attacker_advantage(self, val): + @cec_attestation_report.setter + def cec_attestation_report(self, val): if self._readonly: - raise ValueError("This ConfComputeGetKeyRotationThresholdInfo_v1 instance is read-only") - self._ptr[0].attackerAdvantage = val + raise ValueError("This ConfComputeGpuAttestationReport instance is read-only") + if self._ptr[0].cecAttestationReportSize == 0: + return + cdef view.array arr = view.array(shape=(self._ptr[0].cecAttestationReportSize,), itemsize=sizeof(unsigned char), format="B", mode="c") + arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) + memcpy((&(self._ptr[0].cecAttestationReport)), (arr.data), sizeof(unsigned char) * len(val)) @staticmethod def from_data(data): - """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given NumPy array. + """Create an ConfComputeGpuAttestationReport instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `conf_compute_get_key_rotation_threshold_info_v1_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `conf_compute_gpu_attestation_report_dtype` holding the data. """ - return __from_data(data, "conf_compute_get_key_rotation_threshold_info_v1_dtype", conf_compute_get_key_rotation_threshold_info_v1_dtype, ConfComputeGetKeyRotationThresholdInfo_v1) + return __from_data(data, "conf_compute_gpu_attestation_report_dtype", conf_compute_gpu_attestation_report_dtype, ConfComputeGpuAttestationReport) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an ConfComputeGetKeyRotationThresholdInfo_v1 instance wrapping the given pointer. + """Create an ConfComputeGpuAttestationReport instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -11861,16 +11064,16 @@ cdef class ConfComputeGetKeyRotationThresholdInfo_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef ConfComputeGetKeyRotationThresholdInfo_v1 obj = ConfComputeGetKeyRotationThresholdInfo_v1.__new__(ConfComputeGetKeyRotationThresholdInfo_v1) + cdef ConfComputeGpuAttestationReport obj = ConfComputeGpuAttestationReport.__new__(ConfComputeGpuAttestationReport) if owner is None: - obj._ptr = malloc(sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) + obj._ptr = malloc(sizeof(nvmlConfComputeGpuAttestationReport_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating ConfComputeGetKeyRotationThresholdInfo_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t)) + raise MemoryError("Error allocating ConfComputeGpuAttestationReport") + memcpy((obj._ptr), ptr, sizeof(nvmlConfComputeGpuAttestationReport_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly @@ -11969,7 +11172,10 @@ cdef class GpuFabricInfo_v2: @property def cluster_uuid(self): """~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs.""" - cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 16 == 0: + return _numpy.array([]) + arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].clusterUuid)) return _numpy.asarray(arr) @@ -11977,6 +11183,8 @@ cdef class GpuFabricInfo_v2: def cluster_uuid(self, val): if self._readonly: raise ValueError("This GpuFabricInfo_v2 instance is read-only") + if 16 == 0: + return cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].clusterUuid)), (arr.data), sizeof(unsigned char) * len(val)) @@ -12150,7 +11358,10 @@ cdef class NvlinkSupportedBwModes_v1: @property def bw_modes(self): """~_numpy.uint8: (array of length 23).""" - cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if self._ptr[0].totalBwModes == 0: + return _numpy.array([]) + arr = view.array(shape=(self._ptr[0].totalBwModes,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].bwModes)) return _numpy.asarray(arr) @@ -12158,21 +11369,12 @@ cdef class NvlinkSupportedBwModes_v1: def bw_modes(self, val): if self._readonly: raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only") - cdef view.array arr = view.array(shape=(23,), itemsize=sizeof(unsigned char), format="B", mode="c") + if self._ptr[0].totalBwModes == 0: + return + cdef view.array arr = view.array(shape=(self._ptr[0].totalBwModes,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].bwModes)), (arr.data), sizeof(unsigned char) * len(val)) - @property - def total_bw_modes(self): - """int: """ - return self._ptr[0].totalBwModes - - @total_bw_modes.setter - def total_bw_modes(self, val): - if self._readonly: - raise ValueError("This NvlinkSupportedBwModes_v1 instance is read-only") - self._ptr[0].totalBwModes = val - @staticmethod def from_data(data): """Create an NvlinkSupportedBwModes_v1 instance wrapping the given NumPy array. @@ -13136,274 +12338,6 @@ cdef class GpuInstancePlacement: return obj -cdef _get_gpu_instance_profile_info_v2_dtype_offsets(): - cdef nvmlGpuInstanceProfileInfo_v2_t pod = nvmlGpuInstanceProfileInfo_v2_t() - return _numpy.dtype({ - 'names': ['version', 'id', 'is_p2p_supported', 'slice_count', 'instance_count', 'multiprocessor_count', 'copy_engine_count', 'decoder_count', 'encoder_count', 'jpeg_count', 'ofa_count', 'memory_size_mb', 'name'], - 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint64, (_numpy.int8, 96)], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.id)) - (&pod), - (&(pod.isP2pSupported)) - (&pod), - (&(pod.sliceCount)) - (&pod), - (&(pod.instanceCount)) - (&pod), - (&(pod.multiprocessorCount)) - (&pod), - (&(pod.copyEngineCount)) - (&pod), - (&(pod.decoderCount)) - (&pod), - (&(pod.encoderCount)) - (&pod), - (&(pod.jpegCount)) - (&pod), - (&(pod.ofaCount)) - (&pod), - (&(pod.memorySizeMB)) - (&pod), - (&(pod.name)) - (&pod), - ], - 'itemsize': sizeof(nvmlGpuInstanceProfileInfo_v2_t), - }) - -gpu_instance_profile_info_v2_dtype = _get_gpu_instance_profile_info_v2_dtype_offsets() - -cdef class GpuInstanceProfileInfo_v2: - """Empty-initialize an instance of `nvmlGpuInstanceProfileInfo_v2_t`. - - - .. seealso:: `nvmlGpuInstanceProfileInfo_v2_t` - """ - cdef: - nvmlGpuInstanceProfileInfo_v2_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlGpuInstanceProfileInfo_v2_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating GpuInstanceProfileInfo_v2") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlGpuInstanceProfileInfo_v2_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.GpuInstanceProfileInfo_v2 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef GpuInstanceProfileInfo_v2 other_ - if not isinstance(other, GpuInstanceProfileInfo_v2): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlGpuInstanceProfileInfo_v2_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating GpuInstanceProfileInfo_v2") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlGpuInstanceProfileInfo_v2_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: """ - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].version = val - - @property - def id(self): - """int: """ - return self._ptr[0].id - - @id.setter - def id(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].id = val - - @property - def is_p2p_supported(self): - """int: """ - return self._ptr[0].isP2pSupported - - @is_p2p_supported.setter - def is_p2p_supported(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].isP2pSupported = val - - @property - def slice_count(self): - """int: """ - return self._ptr[0].sliceCount - - @slice_count.setter - def slice_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sliceCount = val - - @property - def instance_count(self): - """int: """ - return self._ptr[0].instanceCount - - @instance_count.setter - def instance_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].instanceCount = val - - @property - def multiprocessor_count(self): - """int: """ - return self._ptr[0].multiprocessorCount - - @multiprocessor_count.setter - def multiprocessor_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].multiprocessorCount = val - - @property - def copy_engine_count(self): - """int: """ - return self._ptr[0].copyEngineCount - - @copy_engine_count.setter - def copy_engine_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].copyEngineCount = val - - @property - def decoder_count(self): - """int: """ - return self._ptr[0].decoderCount - - @decoder_count.setter - def decoder_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].decoderCount = val - - @property - def encoder_count(self): - """int: """ - return self._ptr[0].encoderCount - - @encoder_count.setter - def encoder_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].encoderCount = val - - @property - def jpeg_count(self): - """int: """ - return self._ptr[0].jpegCount - - @jpeg_count.setter - def jpeg_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].jpegCount = val - - @property - def ofa_count(self): - """int: """ - return self._ptr[0].ofaCount - - @ofa_count.setter - def ofa_count(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].ofaCount = val - - @property - def memory_size_mb(self): - """int: """ - return self._ptr[0].memorySizeMB - - @memory_size_mb.setter - def memory_size_mb(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].memorySizeMB = val - - @property - def name(self): - """~_numpy.int8: (array of length 96).""" - return cpython.PyUnicode_FromString(self._ptr[0].name) - - @name.setter - def name(self, val): - if self._readonly: - raise ValueError("This GpuInstanceProfileInfo_v2 instance is read-only") - cdef bytes buf = val.encode() - if len(buf) >= 96: - raise ValueError("String too long for field name, max length is 95") - cdef char *ptr = buf - memcpy((self._ptr[0].name), ptr, 96) - - @staticmethod - def from_data(data): - """Create an GpuInstanceProfileInfo_v2 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `gpu_instance_profile_info_v2_dtype` holding the data. - """ - return __from_data(data, "gpu_instance_profile_info_v2_dtype", gpu_instance_profile_info_v2_dtype, GpuInstanceProfileInfo_v2) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an GpuInstanceProfileInfo_v2 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef GpuInstanceProfileInfo_v2 obj = GpuInstanceProfileInfo_v2.__new__(GpuInstanceProfileInfo_v2) - if owner is None: - obj._ptr = malloc(sizeof(nvmlGpuInstanceProfileInfo_v2_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating GpuInstanceProfileInfo_v2") - memcpy((obj._ptr), ptr, sizeof(nvmlGpuInstanceProfileInfo_v2_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - cdef _get_gpu_instance_profile_info_v3_dtype_offsets(): cdef nvmlGpuInstanceProfileInfo_v3_t pod = nvmlGpuInstanceProfileInfo_v3_t() return _numpy.dtype({ @@ -13769,304 +12703,60 @@ cdef class ComputeInstancePlacement: if key_ >= size or key_ <= -(size+1): raise IndexError("index is out of bounds") if key_ < 0: - key_ += size - return ComputeInstancePlacement.from_data(self._data[key_:key_+1]) - out = self._data[key] - if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype: - return ComputeInstancePlacement.from_data(out) - return out - - def __setitem__(self, key, val): - self._data[key] = val - - @staticmethod - def from_data(data): - """Create an ComputeInstancePlacement instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a 1D array of dtype `compute_instance_placement_dtype` holding the data. - """ - cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement) - if not isinstance(data, _numpy.ndarray): - raise TypeError("data argument must be a NumPy ndarray") - if data.ndim != 1: - raise ValueError("data array must be 1D") - if data.dtype != compute_instance_placement_dtype: - raise ValueError("data array must be of dtype compute_instance_placement_dtype") - obj._data = data.view(_numpy.recarray) - - return obj - - @staticmethod - def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False): - """Create an ComputeInstancePlacement instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - size (int): number of structs, default=1. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement) - cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE - cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( - ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag) - data = _numpy.ndarray(size, buffer=buf, dtype=compute_instance_placement_dtype) - obj._data = data.view(_numpy.recarray) - - return obj - - -cdef _get_compute_instance_profile_info_v2_dtype_offsets(): - cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t() - return _numpy.dtype({ - 'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'], - 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, (_numpy.int8, 96)], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.id)) - (&pod), - (&(pod.sliceCount)) - (&pod), - (&(pod.instanceCount)) - (&pod), - (&(pod.multiprocessorCount)) - (&pod), - (&(pod.sharedCopyEngineCount)) - (&pod), - (&(pod.sharedDecoderCount)) - (&pod), - (&(pod.sharedEncoderCount)) - (&pod), - (&(pod.sharedJpegCount)) - (&pod), - (&(pod.sharedOfaCount)) - (&pod), - (&(pod.name)) - (&pod), - ], - 'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v2_t), - }) - -compute_instance_profile_info_v2_dtype = _get_compute_instance_profile_info_v2_dtype_offsets() - -cdef class ComputeInstanceProfileInfo_v2: - """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v2_t`. - - - .. seealso:: `nvmlComputeInstanceProfileInfo_v2_t` - """ - cdef: - nvmlComputeInstanceProfileInfo_v2_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlComputeInstanceProfileInfo_v2_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef ComputeInstanceProfileInfo_v2 other_ - if not isinstance(other, ComputeInstanceProfileInfo_v2): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: """ - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].version = val - - @property - def id(self): - """int: """ - return self._ptr[0].id - - @id.setter - def id(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].id = val - - @property - def slice_count(self): - """int: """ - return self._ptr[0].sliceCount - - @slice_count.setter - def slice_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sliceCount = val - - @property - def instance_count(self): - """int: """ - return self._ptr[0].instanceCount - - @instance_count.setter - def instance_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].instanceCount = val - - @property - def multiprocessor_count(self): - """int: """ - return self._ptr[0].multiprocessorCount - - @multiprocessor_count.setter - def multiprocessor_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].multiprocessorCount = val - - @property - def shared_copy_engine_count(self): - """int: """ - return self._ptr[0].sharedCopyEngineCount - - @shared_copy_engine_count.setter - def shared_copy_engine_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sharedCopyEngineCount = val - - @property - def shared_decoder_count(self): - """int: """ - return self._ptr[0].sharedDecoderCount - - @shared_decoder_count.setter - def shared_decoder_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sharedDecoderCount = val - - @property - def shared_encoder_count(self): - """int: """ - return self._ptr[0].sharedEncoderCount - - @shared_encoder_count.setter - def shared_encoder_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sharedEncoderCount = val - - @property - def shared_jpeg_count(self): - """int: """ - return self._ptr[0].sharedJpegCount - - @shared_jpeg_count.setter - def shared_jpeg_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sharedJpegCount = val - - @property - def shared_ofa_count(self): - """int: """ - return self._ptr[0].sharedOfaCount - - @shared_ofa_count.setter - def shared_ofa_count(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - self._ptr[0].sharedOfaCount = val - - @property - def name(self): - """~_numpy.int8: (array of length 96).""" - return cpython.PyUnicode_FromString(self._ptr[0].name) - - @name.setter - def name(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") - cdef bytes buf = val.encode() - if len(buf) >= 96: - raise ValueError("String too long for field name, max length is 95") - cdef char *ptr = buf - memcpy((self._ptr[0].name), ptr, 96) + key_ += size + return ComputeInstancePlacement.from_data(self._data[key_:key_+1]) + out = self._data[key] + if isinstance(out, _numpy.recarray) and out.dtype == compute_instance_placement_dtype: + return ComputeInstancePlacement.from_data(out) + return out + + def __setitem__(self, key, val): + self._data[key] = val @staticmethod def from_data(data): - """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given NumPy array. + """Create an ComputeInstancePlacement instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v2_dtype` holding the data. + data (_numpy.ndarray): a 1D array of dtype `compute_instance_placement_dtype` holding the data. """ - return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2) + cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement) + if not isinstance(data, _numpy.ndarray): + raise TypeError("data argument must be a NumPy ndarray") + if data.ndim != 1: + raise ValueError("data array must be 1D") + if data.dtype != compute_instance_placement_dtype: + raise ValueError("data array must be of dtype compute_instance_placement_dtype") + obj._data = data.view(_numpy.recarray) + + return obj @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer. + def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False): + """Create an ComputeInstancePlacement instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. + size (int): number of structs, default=1. readonly (bool): whether the data is read-only (to the user). default is `False`. """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2) - if owner is None: - obj._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2") - memcpy((obj._ptr), ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly + cdef ComputeInstancePlacement obj = ComputeInstancePlacement.__new__(ComputeInstancePlacement) + cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE + cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( + ptr, sizeof(nvmlComputeInstancePlacement_t) * size, flag) + data = _numpy.ndarray(size, buffer=buf, dtype=compute_instance_placement_dtype) + obj._data = data.view(_numpy.recarray) + return obj -cdef _get_compute_instance_profile_info_v3_dtype_offsets(): - cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t() +cdef _get_compute_instance_profile_info_v2_dtype_offsets(): + cdef nvmlComputeInstanceProfileInfo_v2_t pod = nvmlComputeInstanceProfileInfo_v2_t() return _numpy.dtype({ - 'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'], - 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, (_numpy.int8, 96), _numpy.uint32], + 'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name'], + 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, (_numpy.int8, 96)], 'offsets': [ (&(pod.version)) - (&pod), (&(pod.id)) - (&pod), @@ -14079,42 +12769,41 @@ cdef _get_compute_instance_profile_info_v3_dtype_offsets(): (&(pod.sharedJpegCount)) - (&pod), (&(pod.sharedOfaCount)) - (&pod), (&(pod.name)) - (&pod), - (&(pod.capabilities)) - (&pod), ], - 'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v3_t), + 'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v2_t), }) -compute_instance_profile_info_v3_dtype = _get_compute_instance_profile_info_v3_dtype_offsets() +compute_instance_profile_info_v2_dtype = _get_compute_instance_profile_info_v2_dtype_offsets() -cdef class ComputeInstanceProfileInfo_v3: - """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v3_t`. +cdef class ComputeInstanceProfileInfo_v2: + """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v2_t`. - .. seealso:: `nvmlComputeInstanceProfileInfo_v3_t` + .. seealso:: `nvmlComputeInstanceProfileInfo_v2_t` """ cdef: - nvmlComputeInstanceProfileInfo_v3_t *_ptr + nvmlComputeInstanceProfileInfo_v2_t *_ptr object _owner bint _owned bint _readonly def __init__(self): - self._ptr = calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t)) + self._ptr = calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v2_t)) if self._ptr == NULL: - raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3") + raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2") self._owner = None self._owned = True self._readonly = False def __dealloc__(self): - cdef nvmlComputeInstanceProfileInfo_v3_t *ptr + cdef nvmlComputeInstanceProfileInfo_v2_t *ptr if self._owned and self._ptr != NULL: ptr = self._ptr self._ptr = NULL free(ptr) def __repr__(self): - return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>" + return f"<{__name__}.ComputeInstanceProfileInfo_v2 object at {hex(id(self))}>" @property def ptr(self): @@ -14128,18 +12817,18 @@ cdef class ComputeInstanceProfileInfo_v3: return (self._ptr) def __eq__(self, other): - cdef ComputeInstanceProfileInfo_v3 other_ - if not isinstance(other, ComputeInstanceProfileInfo_v3): + cdef ComputeInstanceProfileInfo_v2 other_ + if not isinstance(other, ComputeInstanceProfileInfo_v2): return False other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0) + return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v2_t)) == 0) def __setitem__(self, key, val): if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t)) + self._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t)) if self._ptr == NULL: - raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t)) + raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2") + memcpy(self._ptr, val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v2_t)) self._owner = None self._owned = True self._readonly = not val.flags.writeable @@ -14154,7 +12843,7 @@ cdef class ComputeInstanceProfileInfo_v3: @version.setter def version(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].version = val @property @@ -14165,7 +12854,7 @@ cdef class ComputeInstanceProfileInfo_v3: @id.setter def id(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].id = val @property @@ -14176,7 +12865,7 @@ cdef class ComputeInstanceProfileInfo_v3: @slice_count.setter def slice_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].sliceCount = val @property @@ -14187,7 +12876,7 @@ cdef class ComputeInstanceProfileInfo_v3: @instance_count.setter def instance_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].instanceCount = val @property @@ -14198,7 +12887,7 @@ cdef class ComputeInstanceProfileInfo_v3: @multiprocessor_count.setter def multiprocessor_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].multiprocessorCount = val @property @@ -14209,7 +12898,7 @@ cdef class ComputeInstanceProfileInfo_v3: @shared_copy_engine_count.setter def shared_copy_engine_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].sharedCopyEngineCount = val @property @@ -14220,7 +12909,7 @@ cdef class ComputeInstanceProfileInfo_v3: @shared_decoder_count.setter def shared_decoder_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].sharedDecoderCount = val @property @@ -14231,7 +12920,7 @@ cdef class ComputeInstanceProfileInfo_v3: @shared_encoder_count.setter def shared_encoder_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].sharedEncoderCount = val @property @@ -14242,7 +12931,7 @@ cdef class ComputeInstanceProfileInfo_v3: @shared_jpeg_count.setter def shared_jpeg_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].sharedJpegCount = val @property @@ -14253,7 +12942,7 @@ cdef class ComputeInstanceProfileInfo_v3: @shared_ofa_count.setter def shared_ofa_count(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") self._ptr[0].sharedOfaCount = val @property @@ -14264,168 +12953,25 @@ cdef class ComputeInstanceProfileInfo_v3: @name.setter def name(self, val): if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v2 instance is read-only") cdef bytes buf = val.encode() if len(buf) >= 96: raise ValueError("String too long for field name, max length is 95") cdef char *ptr = buf memcpy((self._ptr[0].name), ptr, 96) - @property - def capabilities(self): - """int: """ - return self._ptr[0].capabilities - - @capabilities.setter - def capabilities(self, val): - if self._readonly: - raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") - self._ptr[0].capabilities = val - - @staticmethod - def from_data(data): - """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v3_dtype` holding the data. - """ - return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3) - if owner is None: - obj._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3") - memcpy((obj._ptr), ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - -cdef _get_gpm_support_dtype_offsets(): - cdef nvmlGpmSupport_t pod = nvmlGpmSupport_t() - return _numpy.dtype({ - 'names': ['version', 'is_supported_device'], - 'formats': [_numpy.uint32, _numpy.uint32], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.isSupportedDevice)) - (&pod), - ], - 'itemsize': sizeof(nvmlGpmSupport_t), - }) - -gpm_support_dtype = _get_gpm_support_dtype_offsets() - -cdef class GpmSupport: - """Empty-initialize an instance of `nvmlGpmSupport_t`. - - - .. seealso:: `nvmlGpmSupport_t` - """ - cdef: - nvmlGpmSupport_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlGpmSupport_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating GpmSupport") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlGpmSupport_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.GpmSupport object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef GpmSupport other_ - if not isinstance(other, GpmSupport): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlGpmSupport_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlGpmSupport_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating GpmSupport") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlGpmSupport_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: IN: Set to NVML_GPM_SUPPORT_VERSION.""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This GpmSupport instance is read-only") - self._ptr[0].version = val - - @property - def is_supported_device(self): - """int: OUT: Indicates device support.""" - return self._ptr[0].isSupportedDevice - - @is_supported_device.setter - def is_supported_device(self, val): - if self._readonly: - raise ValueError("This GpmSupport instance is read-only") - self._ptr[0].isSupportedDevice = val - @staticmethod def from_data(data): - """Create an GpmSupport instance wrapping the given NumPy array. + """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `gpm_support_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v2_dtype` holding the data. """ - return __from_data(data, "gpm_support_dtype", gpm_support_dtype, GpmSupport) + return __from_data(data, "compute_instance_profile_info_v2_dtype", compute_instance_profile_info_v2_dtype, ComputeInstanceProfileInfo_v2) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an GpmSupport instance wrapping the given pointer. + """Create an ComputeInstanceProfileInfo_v2 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -14434,65 +12980,75 @@ cdef class GpmSupport: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef GpmSupport obj = GpmSupport.__new__(GpmSupport) + cdef ComputeInstanceProfileInfo_v2 obj = ComputeInstanceProfileInfo_v2.__new__(ComputeInstanceProfileInfo_v2) if owner is None: - obj._ptr = malloc(sizeof(nvmlGpmSupport_t)) + obj._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v2_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating GpmSupport") - memcpy((obj._ptr), ptr, sizeof(nvmlGpmSupport_t)) + raise MemoryError("Error allocating ComputeInstanceProfileInfo_v2") + memcpy((obj._ptr), ptr, sizeof(nvmlComputeInstanceProfileInfo_v2_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly return obj -cdef _get_device_capabilities_v1_dtype_offsets(): - cdef nvmlDeviceCapabilities_v1_t pod = nvmlDeviceCapabilities_v1_t() +cdef _get_compute_instance_profile_info_v3_dtype_offsets(): + cdef nvmlComputeInstanceProfileInfo_v3_t pod = nvmlComputeInstanceProfileInfo_v3_t() return _numpy.dtype({ - 'names': ['version', 'cap_mask'], - 'formats': [_numpy.uint32, _numpy.uint32], + 'names': ['version', 'id', 'slice_count', 'instance_count', 'multiprocessor_count', 'shared_copy_engine_count', 'shared_decoder_count', 'shared_encoder_count', 'shared_jpeg_count', 'shared_ofa_count', 'name', 'capabilities'], + 'formats': [_numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, _numpy.uint32, (_numpy.int8, 96), _numpy.uint32], 'offsets': [ (&(pod.version)) - (&pod), - (&(pod.capMask)) - (&pod), + (&(pod.id)) - (&pod), + (&(pod.sliceCount)) - (&pod), + (&(pod.instanceCount)) - (&pod), + (&(pod.multiprocessorCount)) - (&pod), + (&(pod.sharedCopyEngineCount)) - (&pod), + (&(pod.sharedDecoderCount)) - (&pod), + (&(pod.sharedEncoderCount)) - (&pod), + (&(pod.sharedJpegCount)) - (&pod), + (&(pod.sharedOfaCount)) - (&pod), + (&(pod.name)) - (&pod), + (&(pod.capabilities)) - (&pod), ], - 'itemsize': sizeof(nvmlDeviceCapabilities_v1_t), + 'itemsize': sizeof(nvmlComputeInstanceProfileInfo_v3_t), }) -device_capabilities_v1_dtype = _get_device_capabilities_v1_dtype_offsets() +compute_instance_profile_info_v3_dtype = _get_compute_instance_profile_info_v3_dtype_offsets() -cdef class DeviceCapabilities_v1: - """Empty-initialize an instance of `nvmlDeviceCapabilities_v1_t`. +cdef class ComputeInstanceProfileInfo_v3: + """Empty-initialize an instance of `nvmlComputeInstanceProfileInfo_v3_t`. - .. seealso:: `nvmlDeviceCapabilities_v1_t` + .. seealso:: `nvmlComputeInstanceProfileInfo_v3_t` """ cdef: - nvmlDeviceCapabilities_v1_t *_ptr + nvmlComputeInstanceProfileInfo_v3_t *_ptr object _owner bint _owned bint _readonly def __init__(self): - self._ptr = calloc(1, sizeof(nvmlDeviceCapabilities_v1_t)) + self._ptr = calloc(1, sizeof(nvmlComputeInstanceProfileInfo_v3_t)) if self._ptr == NULL: - raise MemoryError("Error allocating DeviceCapabilities_v1") + raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3") self._owner = None self._owned = True self._readonly = False def __dealloc__(self): - cdef nvmlDeviceCapabilities_v1_t *ptr + cdef nvmlComputeInstanceProfileInfo_v3_t *ptr if self._owned and self._ptr != NULL: ptr = self._ptr self._ptr = NULL free(ptr) def __repr__(self): - return f"<{__name__}.DeviceCapabilities_v1 object at {hex(id(self))}>" + return f"<{__name__}.ComputeInstanceProfileInfo_v3 object at {hex(id(self))}>" @property def ptr(self): @@ -14506,18 +13062,18 @@ cdef class DeviceCapabilities_v1: return (self._ptr) def __eq__(self, other): - cdef DeviceCapabilities_v1 other_ - if not isinstance(other, DeviceCapabilities_v1): + cdef ComputeInstanceProfileInfo_v3 other_ + if not isinstance(other, ComputeInstanceProfileInfo_v3): return False other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlDeviceCapabilities_v1_t)) == 0) + return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlComputeInstanceProfileInfo_v3_t)) == 0) def __setitem__(self, key, val): if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlDeviceCapabilities_v1_t)) + self._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t)) if self._ptr == NULL: - raise MemoryError("Error allocating DeviceCapabilities_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlDeviceCapabilities_v1_t)) + raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3") + memcpy(self._ptr, val.ctypes.data, sizeof(nvmlComputeInstanceProfileInfo_v3_t)) self._owner = None self._owned = True self._readonly = not val.flags.writeable @@ -14526,38 +13082,152 @@ cdef class DeviceCapabilities_v1: @property def version(self): - """int: the API version number""" + """int: """ return self._ptr[0].version @version.setter def version(self, val): if self._readonly: - raise ValueError("This DeviceCapabilities_v1 instance is read-only") + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") self._ptr[0].version = val @property - def cap_mask(self): - """int: OUT: Bit mask of capabilities.""" - return self._ptr[0].capMask + def id(self): + """int: """ + return self._ptr[0].id + + @id.setter + def id(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].id = val + + @property + def slice_count(self): + """int: """ + return self._ptr[0].sliceCount + + @slice_count.setter + def slice_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].sliceCount = val + + @property + def instance_count(self): + """int: """ + return self._ptr[0].instanceCount + + @instance_count.setter + def instance_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].instanceCount = val + + @property + def multiprocessor_count(self): + """int: """ + return self._ptr[0].multiprocessorCount + + @multiprocessor_count.setter + def multiprocessor_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].multiprocessorCount = val + + @property + def shared_copy_engine_count(self): + """int: """ + return self._ptr[0].sharedCopyEngineCount + + @shared_copy_engine_count.setter + def shared_copy_engine_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].sharedCopyEngineCount = val + + @property + def shared_decoder_count(self): + """int: """ + return self._ptr[0].sharedDecoderCount + + @shared_decoder_count.setter + def shared_decoder_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].sharedDecoderCount = val + + @property + def shared_encoder_count(self): + """int: """ + return self._ptr[0].sharedEncoderCount + + @shared_encoder_count.setter + def shared_encoder_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].sharedEncoderCount = val + + @property + def shared_jpeg_count(self): + """int: """ + return self._ptr[0].sharedJpegCount + + @shared_jpeg_count.setter + def shared_jpeg_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].sharedJpegCount = val + + @property + def shared_ofa_count(self): + """int: """ + return self._ptr[0].sharedOfaCount + + @shared_ofa_count.setter + def shared_ofa_count(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].sharedOfaCount = val + + @property + def name(self): + """~_numpy.int8: (array of length 96).""" + return cpython.PyUnicode_FromString(self._ptr[0].name) + + @name.setter + def name(self, val): + if self._readonly: + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + cdef bytes buf = val.encode() + if len(buf) >= 96: + raise ValueError("String too long for field name, max length is 95") + cdef char *ptr = buf + memcpy((self._ptr[0].name), ptr, 96) + + @property + def capabilities(self): + """int: """ + return self._ptr[0].capabilities - @cap_mask.setter - def cap_mask(self, val): + @capabilities.setter + def capabilities(self, val): if self._readonly: - raise ValueError("This DeviceCapabilities_v1 instance is read-only") - self._ptr[0].capMask = val + raise ValueError("This ComputeInstanceProfileInfo_v3 instance is read-only") + self._ptr[0].capabilities = val @staticmethod def from_data(data): - """Create an DeviceCapabilities_v1 instance wrapping the given NumPy array. + """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `device_capabilities_v1_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `compute_instance_profile_info_v3_dtype` holding the data. """ - return __from_data(data, "device_capabilities_v1_dtype", device_capabilities_v1_dtype, DeviceCapabilities_v1) + return __from_data(data, "compute_instance_profile_info_v3_dtype", compute_instance_profile_info_v3_dtype, ComputeInstanceProfileInfo_v3) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an DeviceCapabilities_v1 instance wrapping the given pointer. + """Create an ComputeInstanceProfileInfo_v3 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -14566,16 +13236,16 @@ cdef class DeviceCapabilities_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef DeviceCapabilities_v1 obj = DeviceCapabilities_v1.__new__(DeviceCapabilities_v1) + cdef ComputeInstanceProfileInfo_v3 obj = ComputeInstanceProfileInfo_v3.__new__(ComputeInstanceProfileInfo_v3) if owner is None: - obj._ptr = malloc(sizeof(nvmlDeviceCapabilities_v1_t)) + obj._ptr = malloc(sizeof(nvmlComputeInstanceProfileInfo_v3_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating DeviceCapabilities_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlDeviceCapabilities_v1_t)) + raise MemoryError("Error allocating ComputeInstanceProfileInfo_v3") + memcpy((obj._ptr), ptr, sizeof(nvmlComputeInstanceProfileInfo_v3_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly @@ -14824,148 +13494,16 @@ cdef class RepairStatus_v1: @staticmethod def from_data(data): - """Create an RepairStatus_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `repair_status_v1_dtype` holding the data. - """ - return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an RepairStatus_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlRepairStatus_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating RepairStatus_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlRepairStatus_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - -cdef _get_pdi_v1_dtype_offsets(): - cdef nvmlPdi_v1_t pod = nvmlPdi_v1_t() - return _numpy.dtype({ - 'names': ['version', 'value'], - 'formats': [_numpy.uint32, _numpy.uint64], - 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.value)) - (&pod), - ], - 'itemsize': sizeof(nvmlPdi_v1_t), - }) - -pdi_v1_dtype = _get_pdi_v1_dtype_offsets() - -cdef class Pdi_v1: - """Empty-initialize an instance of `nvmlPdi_v1_t`. - - - .. seealso:: `nvmlPdi_v1_t` - """ - cdef: - nvmlPdi_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlPdi_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating Pdi_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlPdi_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.Pdi_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef Pdi_v1 other_ - if not isinstance(other, Pdi_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlPdi_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlPdi_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating Pdi_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlPdi_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def version(self): - """int: API version number.""" - return self._ptr[0].version - - @version.setter - def version(self, val): - if self._readonly: - raise ValueError("This Pdi_v1 instance is read-only") - self._ptr[0].version = val - - @property - def value(self): - """int: 64-bit PDI value""" - return self._ptr[0].value - - @value.setter - def value(self, val): - if self._readonly: - raise ValueError("This Pdi_v1 instance is read-only") - self._ptr[0].value = val - - @staticmethod - def from_data(data): - """Create an Pdi_v1 instance wrapping the given NumPy array. + """Create an RepairStatus_v1 instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `pdi_v1_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `repair_status_v1_dtype` holding the data. """ - return __from_data(data, "pdi_v1_dtype", pdi_v1_dtype, Pdi_v1) + return __from_data(data, "repair_status_v1_dtype", repair_status_v1_dtype, RepairStatus_v1) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an Pdi_v1 instance wrapping the given pointer. + """Create an RepairStatus_v1 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -14974,16 +13512,16 @@ cdef class Pdi_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef Pdi_v1 obj = Pdi_v1.__new__(Pdi_v1) + cdef RepairStatus_v1 obj = RepairStatus_v1.__new__(RepairStatus_v1) if owner is None: - obj._ptr = malloc(sizeof(nvmlPdi_v1_t)) + obj._ptr = malloc(sizeof(nvmlRepairStatus_v1_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating Pdi_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlPdi_v1_t)) + raise MemoryError("Error allocating RepairStatus_v1") + memcpy((obj._ptr), ptr, sizeof(nvmlRepairStatus_v1_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly @@ -15433,7 +13971,10 @@ cdef class GpuFabricInfo_v3: @property def cluster_uuid(self): """~_numpy.uint8: (array of length 16).Uuid of the cluster to which this GPU belongs.""" - cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) + cdef view.array arr + if 16 == 0: + return _numpy.array([]) + arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c", allocate_buffer=False) arr.data = (&(self._ptr[0].clusterUuid)) return _numpy.asarray(arr) @@ -15441,6 +13982,8 @@ cdef class GpuFabricInfo_v3: def cluster_uuid(self, val): if self._readonly: raise ValueError("This GpuFabricInfo_v3 instance is read-only") + if 16 == 0: + return cdef view.array arr = view.array(shape=(16,), itemsize=sizeof(unsigned char), format="B", mode="c") arr[:] = _numpy.asarray(val, dtype=_numpy.uint8) memcpy((&(self._ptr[0].clusterUuid)), (arr.data), sizeof(unsigned char) * len(val)) @@ -15643,283 +14186,7 @@ cdef class NvLinkInfo_v1: @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an NvLinkInfo_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef NvLinkInfo_v1 obj = NvLinkInfo_v1.__new__(NvLinkInfo_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlNvLinkInfo_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating NvLinkInfo_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlNvLinkInfo_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - -cdef _get_nvlink_firmware_version_dtype_offsets(): - cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t() - return _numpy.dtype({ - 'names': ['ucode_type', 'major', 'minor', 'sub_minor'], - 'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32], - 'offsets': [ - (&(pod.ucodeType)) - (&pod), - (&(pod.major)) - (&pod), - (&(pod.minor)) - (&pod), - (&(pod.subMinor)) - (&pod), - ], - 'itemsize': sizeof(nvmlNvlinkFirmwareVersion_t), - }) - -nvlink_firmware_version_dtype = _get_nvlink_firmware_version_dtype_offsets() - -cdef class NvlinkFirmwareVersion: - """Empty-initialize an instance of `nvmlNvlinkFirmwareVersion_t`. - - - .. seealso:: `nvmlNvlinkFirmwareVersion_t` - """ - cdef: - nvmlNvlinkFirmwareVersion_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating NvlinkFirmwareVersion") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlNvlinkFirmwareVersion_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef NvlinkFirmwareVersion other_ - if not isinstance(other, NvlinkFirmwareVersion): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlNvlinkFirmwareVersion_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating NvlinkFirmwareVersion") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def ucode_type(self): - """int: """ - return self._ptr[0].ucodeType - - @ucode_type.setter - def ucode_type(self, val): - if self._readonly: - raise ValueError("This NvlinkFirmwareVersion instance is read-only") - self._ptr[0].ucodeType = val - - @property - def major(self): - """int: """ - return self._ptr[0].major - - @major.setter - def major(self, val): - if self._readonly: - raise ValueError("This NvlinkFirmwareVersion instance is read-only") - self._ptr[0].major = val - - @property - def minor(self): - """int: """ - return self._ptr[0].minor - - @minor.setter - def minor(self, val): - if self._readonly: - raise ValueError("This NvlinkFirmwareVersion instance is read-only") - self._ptr[0].minor = val - - @property - def sub_minor(self): - """int: """ - return self._ptr[0].subMinor - - @sub_minor.setter - def sub_minor(self, val): - if self._readonly: - raise ValueError("This NvlinkFirmwareVersion instance is read-only") - self._ptr[0].subMinor = val - - @staticmethod - def from_data(data): - """Create an NvlinkFirmwareVersion instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `nvlink_firmware_version_dtype` holding the data. - """ - return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an NvlinkFirmwareVersion instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion) - if owner is None: - obj._ptr = malloc(sizeof(nvmlNvlinkFirmwareVersion_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating NvlinkFirmwareVersion") - memcpy((obj._ptr), ptr, sizeof(nvmlNvlinkFirmwareVersion_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - return obj - - -cdef _get_unrepairable_memory_status_v1_dtype_offsets(): - cdef nvmlUnrepairableMemoryStatus_v1_t pod = nvmlUnrepairableMemoryStatus_v1_t() - return _numpy.dtype({ - 'names': ['b_unrepairable_memory'], - 'formats': [_numpy.uint32], - 'offsets': [ - (&(pod.bUnrepairableMemory)) - (&pod), - ], - 'itemsize': sizeof(nvmlUnrepairableMemoryStatus_v1_t), - }) - -unrepairable_memory_status_v1_dtype = _get_unrepairable_memory_status_v1_dtype_offsets() - -cdef class UnrepairableMemoryStatus_v1: - """Empty-initialize an instance of `nvmlUnrepairableMemoryStatus_v1_t`. - - - .. seealso:: `nvmlUnrepairableMemoryStatus_v1_t` - """ - cdef: - nvmlUnrepairableMemoryStatus_v1_t *_ptr - object _owner - bint _owned - bint _readonly - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlUnrepairableMemoryStatus_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating UnrepairableMemoryStatus_v1") - self._owner = None - self._owned = True - self._readonly = False - - def __dealloc__(self): - cdef nvmlUnrepairableMemoryStatus_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.UnrepairableMemoryStatus_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef UnrepairableMemoryStatus_v1 other_ - if not isinstance(other, UnrepairableMemoryStatus_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlUnrepairableMemoryStatus_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlUnrepairableMemoryStatus_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating UnrepairableMemoryStatus_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlUnrepairableMemoryStatus_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def b_unrepairable_memory(self): - """int: Reference to `unsigned` int.""" - return self._ptr[0].bUnrepairableMemory - - @b_unrepairable_memory.setter - def b_unrepairable_memory(self, val): - if self._readonly: - raise ValueError("This UnrepairableMemoryStatus_v1 instance is read-only") - self._ptr[0].bUnrepairableMemory = val - - @staticmethod - def from_data(data): - """Create an UnrepairableMemoryStatus_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `unrepairable_memory_status_v1_dtype` holding the data. - """ - return __from_data(data, "unrepairable_memory_status_v1_dtype", unrepairable_memory_status_v1_dtype, UnrepairableMemoryStatus_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an UnrepairableMemoryStatus_v1 instance wrapping the given pointer. + """Create an NvLinkInfo_v1 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -15928,65 +14195,67 @@ cdef class UnrepairableMemoryStatus_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef UnrepairableMemoryStatus_v1 obj = UnrepairableMemoryStatus_v1.__new__(UnrepairableMemoryStatus_v1) + cdef NvLinkInfo_v1 obj = NvLinkInfo_v1.__new__(NvLinkInfo_v1) if owner is None: - obj._ptr = malloc(sizeof(nvmlUnrepairableMemoryStatus_v1_t)) + obj._ptr = malloc(sizeof(nvmlNvLinkInfo_v1_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating UnrepairableMemoryStatus_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlUnrepairableMemoryStatus_v1_t)) + raise MemoryError("Error allocating NvLinkInfo_v1") + memcpy((obj._ptr), ptr, sizeof(nvmlNvLinkInfo_v1_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly return obj -cdef _get_rusd_settings_v1_dtype_offsets(): - cdef nvmlRusdSettings_v1_t pod = nvmlRusdSettings_v1_t() +cdef _get_nvlink_firmware_version_dtype_offsets(): + cdef nvmlNvlinkFirmwareVersion_t pod = nvmlNvlinkFirmwareVersion_t() return _numpy.dtype({ - 'names': ['version', 'poll_mask'], - 'formats': [_numpy.uint32, _numpy.uint64], + 'names': ['ucode_type', 'major', 'minor', 'sub_minor'], + 'formats': [_numpy.uint8, _numpy.uint32, _numpy.uint32, _numpy.uint32], 'offsets': [ - (&(pod.version)) - (&pod), - (&(pod.pollMask)) - (&pod), + (&(pod.ucodeType)) - (&pod), + (&(pod.major)) - (&pod), + (&(pod.minor)) - (&pod), + (&(pod.subMinor)) - (&pod), ], - 'itemsize': sizeof(nvmlRusdSettings_v1_t), + 'itemsize': sizeof(nvmlNvlinkFirmwareVersion_t), }) -rusd_settings_v1_dtype = _get_rusd_settings_v1_dtype_offsets() +nvlink_firmware_version_dtype = _get_nvlink_firmware_version_dtype_offsets() -cdef class RusdSettings_v1: - """Empty-initialize an instance of `nvmlRusdSettings_v1_t`. +cdef class NvlinkFirmwareVersion: + """Empty-initialize an instance of `nvmlNvlinkFirmwareVersion_t`. - .. seealso:: `nvmlRusdSettings_v1_t` + .. seealso:: `nvmlNvlinkFirmwareVersion_t` """ cdef: - nvmlRusdSettings_v1_t *_ptr + nvmlNvlinkFirmwareVersion_t *_ptr object _owner bint _owned bint _readonly def __init__(self): - self._ptr = calloc(1, sizeof(nvmlRusdSettings_v1_t)) + self._ptr = calloc(1, sizeof(nvmlNvlinkFirmwareVersion_t)) if self._ptr == NULL: - raise MemoryError("Error allocating RusdSettings_v1") + raise MemoryError("Error allocating NvlinkFirmwareVersion") self._owner = None self._owned = True self._readonly = False def __dealloc__(self): - cdef nvmlRusdSettings_v1_t *ptr + cdef nvmlNvlinkFirmwareVersion_t *ptr if self._owned and self._ptr != NULL: ptr = self._ptr self._ptr = NULL free(ptr) def __repr__(self): - return f"<{__name__}.RusdSettings_v1 object at {hex(id(self))}>" + return f"<{__name__}.NvlinkFirmwareVersion object at {hex(id(self))}>" @property def ptr(self): @@ -16000,18 +14269,18 @@ cdef class RusdSettings_v1: return (self._ptr) def __eq__(self, other): - cdef RusdSettings_v1 other_ - if not isinstance(other, RusdSettings_v1): + cdef NvlinkFirmwareVersion other_ + if not isinstance(other, NvlinkFirmwareVersion): return False other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlRusdSettings_v1_t)) == 0) + return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlNvlinkFirmwareVersion_t)) == 0) def __setitem__(self, key, val): if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlRusdSettings_v1_t)) + self._ptr = malloc(sizeof(nvmlNvlinkFirmwareVersion_t)) if self._ptr == NULL: - raise MemoryError("Error allocating RusdSettings_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlRusdSettings_v1_t)) + raise MemoryError("Error allocating NvlinkFirmwareVersion") + memcpy(self._ptr, val.ctypes.data, sizeof(nvmlNvlinkFirmwareVersion_t)) self._owner = None self._owned = True self._readonly = not val.flags.writeable @@ -16019,39 +14288,61 @@ cdef class RusdSettings_v1: setattr(self, key, val) @property - def version(self): + def ucode_type(self): """int: """ - return self._ptr[0].version + return self._ptr[0].ucodeType - @version.setter - def version(self, val): + @ucode_type.setter + def ucode_type(self, val): if self._readonly: - raise ValueError("This RusdSettings_v1 instance is read-only") - self._ptr[0].version = val + raise ValueError("This NvlinkFirmwareVersion instance is read-only") + self._ptr[0].ucodeType = val + + @property + def major(self): + """int: """ + return self._ptr[0].major + + @major.setter + def major(self, val): + if self._readonly: + raise ValueError("This NvlinkFirmwareVersion instance is read-only") + self._ptr[0].major = val + + @property + def minor(self): + """int: """ + return self._ptr[0].minor + + @minor.setter + def minor(self, val): + if self._readonly: + raise ValueError("This NvlinkFirmwareVersion instance is read-only") + self._ptr[0].minor = val @property - def poll_mask(self): - """int: Bitmask of polling data. 0 value means the GPU's RUSD polling mask is cleared.""" - return self._ptr[0].pollMask + def sub_minor(self): + """int: """ + return self._ptr[0].subMinor - @poll_mask.setter - def poll_mask(self, val): + @sub_minor.setter + def sub_minor(self, val): if self._readonly: - raise ValueError("This RusdSettings_v1 instance is read-only") - self._ptr[0].pollMask = val + raise ValueError("This NvlinkFirmwareVersion instance is read-only") + self._ptr[0].subMinor = val @staticmethod def from_data(data): - """Create an RusdSettings_v1 instance wrapping the given NumPy array. + """Create an NvlinkFirmwareVersion instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `rusd_settings_v1_dtype` holding the data. + data (_numpy.ndarray): a single-element array of dtype `nvlink_firmware_version_dtype` holding the data. """ - return __from_data(data, "rusd_settings_v1_dtype", rusd_settings_v1_dtype, RusdSettings_v1) + return __from_data(data, "nvlink_firmware_version_dtype", nvlink_firmware_version_dtype, NvlinkFirmwareVersion) @staticmethod def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an RusdSettings_v1 instance wrapping the given pointer. + """Create an NvlinkFirmwareVersion instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -16060,16 +14351,16 @@ cdef class RusdSettings_v1: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef RusdSettings_v1 obj = RusdSettings_v1.__new__(RusdSettings_v1) + cdef NvlinkFirmwareVersion obj = NvlinkFirmwareVersion.__new__(NvlinkFirmwareVersion) if owner is None: - obj._ptr = malloc(sizeof(nvmlRusdSettings_v1_t)) + obj._ptr = malloc(sizeof(nvmlNvlinkFirmwareVersion_t)) if obj._ptr == NULL: - raise MemoryError("Error allocating RusdSettings_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlRusdSettings_v1_t)) + raise MemoryError("Error allocating NvlinkFirmwareVersion") + memcpy((obj._ptr), ptr, sizeof(nvmlNvlinkFirmwareVersion_t)) obj._owner = None obj._owned = True else: - obj._ptr = ptr + obj._ptr = ptr obj._owner = owner obj._owned = False obj._readonly = readonly @@ -16563,7 +14854,7 @@ cdef class BridgeChipHierarchy: @property def bridge_chip_info(self): """BridgeChipInfo: """ - return BridgeChipInfo.from_ptr(&(self._ptr[0].bridgeChipInfo), 128, self._readonly) + return BridgeChipInfo.from_ptr(&(self._ptr[0].bridgeChipInfo), self._ptr[0].bridgeCount, self._readonly) @bridge_chip_info.setter def bridge_chip_info(self, val): @@ -16574,17 +14865,6 @@ cdef class BridgeChipHierarchy: raise ValueError(f"Expected length 128 for field bridge_chip_info, got {len(val)}") memcpy(&(self._ptr[0].bridgeChipInfo), (val_._get_ptr()), sizeof(nvmlBridgeChipInfo_t) * 128) - @property - def bridge_count(self): - """int: """ - return self._ptr[0].bridgeCount - - @bridge_count.setter - def bridge_count(self, val): - if self._readonly: - raise ValueError("This BridgeChipHierarchy instance is read-only") - self._ptr[0].bridgeCount = val - @staticmethod def from_data(data): """Create an BridgeChipHierarchy instance wrapping the given NumPy array. @@ -16694,18 +14974,204 @@ cdef class Sample: return int(self._data.time_stamp[0]) return self._data.time_stamp - @time_stamp.setter - def time_stamp(self, val): - self._data.time_stamp = val + @time_stamp.setter + def time_stamp(self, val): + self._data.time_stamp = val + + @property + def sample_value(self): + """value_dtype: """ + return self._data.sample_value + + @sample_value.setter + def sample_value(self, val): + self._data.sample_value = val + + def __getitem__(self, key): + cdef ssize_t key_ + cdef ssize_t size + if isinstance(key, int): + key_ = key + size = self._data.size + if key_ >= size or key_ <= -(size+1): + raise IndexError("index is out of bounds") + if key_ < 0: + key_ += size + return Sample.from_data(self._data[key_:key_+1]) + out = self._data[key] + if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype: + return Sample.from_data(out) + return out + + def __setitem__(self, key, val): + self._data[key] = val + + @staticmethod + def from_data(data): + """Create an Sample instance wrapping the given NumPy array. + + Args: + data (_numpy.ndarray): a 1D array of dtype `sample_dtype` holding the data. + """ + cdef Sample obj = Sample.__new__(Sample) + if not isinstance(data, _numpy.ndarray): + raise TypeError("data argument must be a NumPy ndarray") + if data.ndim != 1: + raise ValueError("data array must be 1D") + if data.dtype != sample_dtype: + raise ValueError("data array must be of dtype sample_dtype") + obj._data = data.view(_numpy.recarray) + + return obj + + @staticmethod + def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False): + """Create an Sample instance wrapping the given pointer. + + Args: + ptr (intptr_t): pointer address as Python :class:`int` to the data. + size (int): number of structs, default=1. + readonly (bool): whether the data is read-only (to the user). default is `False`. + """ + if ptr == 0: + raise ValueError("ptr must not be null (0)") + cdef Sample obj = Sample.__new__(Sample) + cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE + cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( + ptr, sizeof(nvmlSample_t) * size, flag) + data = _numpy.ndarray(size, buffer=buf, dtype=sample_dtype) + obj._data = data.view(_numpy.recarray) + + return obj + + +cdef _get_vgpu_instance_utilization_sample_dtype_offsets(): + cdef nvmlVgpuInstanceUtilizationSample_t pod = nvmlVgpuInstanceUtilizationSample_t() + return _numpy.dtype({ + 'names': ['vgpu_instance', 'time_stamp', 'sm_util', 'mem_util', 'enc_util', 'dec_util'], + 'formats': [_numpy.uint32, _numpy.uint64, value_dtype, value_dtype, value_dtype, value_dtype], + 'offsets': [ + (&(pod.vgpuInstance)) - (&pod), + (&(pod.timeStamp)) - (&pod), + (&(pod.smUtil)) - (&pod), + (&(pod.memUtil)) - (&pod), + (&(pod.encUtil)) - (&pod), + (&(pod.decUtil)) - (&pod), + ], + 'itemsize': sizeof(nvmlVgpuInstanceUtilizationSample_t), + }) + +vgpu_instance_utilization_sample_dtype = _get_vgpu_instance_utilization_sample_dtype_offsets() + +cdef class VgpuInstanceUtilizationSample: + """Empty-initialize an array of `nvmlVgpuInstanceUtilizationSample_t`. + + The resulting object is of length `size` and of dtype `vgpu_instance_utilization_sample_dtype`. + If default-constructed, the instance represents a single struct. + + Args: + size (int): number of structs, default=1. + + + .. seealso:: `nvmlVgpuInstanceUtilizationSample_t` + """ + cdef: + readonly object _data + + + + def __init__(self, size=1): + arr = _numpy.empty(size, dtype=vgpu_instance_utilization_sample_dtype) + self._data = arr.view(_numpy.recarray) + assert self._data.itemsize == sizeof(nvmlVgpuInstanceUtilizationSample_t), \ + f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlVgpuInstanceUtilizationSample_t) }" + + def __repr__(self): + if self._data.size > 1: + return f"<{__name__}.VgpuInstanceUtilizationSample_Array_{self._data.size} object at {hex(id(self))}>" + else: + return f"<{__name__}.VgpuInstanceUtilizationSample object at {hex(id(self))}>" + + @property + def ptr(self): + """Get the pointer address to the data as Python :class:`int`.""" + return self._data.ctypes.data + + cdef intptr_t _get_ptr(self): + return self._data.ctypes.data + + def __int__(self): + if self._data.size > 1: + raise TypeError("int() argument must be a bytes-like object of size 1. " + "To get the pointer address of an array, use .ptr") + return self._data.ctypes.data + + def __len__(self): + return self._data.size + + def __eq__(self, other): + cdef object self_data = self._data + if (not isinstance(other, VgpuInstanceUtilizationSample)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype: + return False + return bool((self_data == other._data).all()) + + @property + def vgpu_instance(self): + """Union[~_numpy.uint32, int]: """ + if self._data.size == 1: + return int(self._data.vgpu_instance[0]) + return self._data.vgpu_instance + + @vgpu_instance.setter + def vgpu_instance(self, val): + self._data.vgpu_instance = val + + @property + def time_stamp(self): + """Union[~_numpy.uint64, int]: """ + if self._data.size == 1: + return int(self._data.time_stamp[0]) + return self._data.time_stamp + + @time_stamp.setter + def time_stamp(self, val): + self._data.time_stamp = val + + @property + def sm_util(self): + """value_dtype: """ + return self._data.sm_util + + @sm_util.setter + def sm_util(self, val): + self._data.sm_util = val + + @property + def mem_util(self): + """value_dtype: """ + return self._data.mem_util + + @mem_util.setter + def mem_util(self, val): + self._data.mem_util = val + + @property + def enc_util(self): + """value_dtype: """ + return self._data.enc_util + + @enc_util.setter + def enc_util(self, val): + self._data.enc_util = val @property - def sample_value(self): + def dec_util(self): """value_dtype: """ - return self._data.sample_value + return self._data.dec_util - @sample_value.setter - def sample_value(self, val): - self._data.sample_value = val + @dec_util.setter + def dec_util(self, val): + self._data.dec_util = val def __getitem__(self, key): cdef ssize_t key_ @@ -16717,10 +15183,10 @@ cdef class Sample: raise IndexError("index is out of bounds") if key_ < 0: key_ += size - return Sample.from_data(self._data[key_:key_+1]) + return VgpuInstanceUtilizationSample.from_data(self._data[key_:key_+1]) out = self._data[key] - if isinstance(out, _numpy.recarray) and out.dtype == sample_dtype: - return Sample.from_data(out) + if isinstance(out, _numpy.recarray) and out.dtype == vgpu_instance_utilization_sample_dtype: + return VgpuInstanceUtilizationSample.from_data(out) return out def __setitem__(self, key, val): @@ -16728,25 +15194,25 @@ cdef class Sample: @staticmethod def from_data(data): - """Create an Sample instance wrapping the given NumPy array. + """Create an VgpuInstanceUtilizationSample instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a 1D array of dtype `sample_dtype` holding the data. + data (_numpy.ndarray): a 1D array of dtype `vgpu_instance_utilization_sample_dtype` holding the data. """ - cdef Sample obj = Sample.__new__(Sample) + cdef VgpuInstanceUtilizationSample obj = VgpuInstanceUtilizationSample.__new__(VgpuInstanceUtilizationSample) if not isinstance(data, _numpy.ndarray): raise TypeError("data argument must be a NumPy ndarray") if data.ndim != 1: raise ValueError("data array must be 1D") - if data.dtype != sample_dtype: - raise ValueError("data array must be of dtype sample_dtype") + if data.dtype != vgpu_instance_utilization_sample_dtype: + raise ValueError("data array must be of dtype vgpu_instance_utilization_sample_dtype") obj._data = data.view(_numpy.recarray) return obj @staticmethod def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False): - """Create an Sample instance wrapping the given pointer. + """Create an VgpuInstanceUtilizationSample instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. @@ -16755,11 +15221,11 @@ cdef class Sample: """ if ptr == 0: raise ValueError("ptr must not be null (0)") - cdef Sample obj = Sample.__new__(Sample) + cdef VgpuInstanceUtilizationSample obj = VgpuInstanceUtilizationSample.__new__(VgpuInstanceUtilizationSample) cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( - ptr, sizeof(nvmlSample_t) * size, flag) - data = _numpy.ndarray(size, buffer=buf, dtype=sample_dtype) + ptr, sizeof(nvmlVgpuInstanceUtilizationSample_t) * size, flag) + data = _numpy.ndarray(size, buffer=buf, dtype=vgpu_instance_utilization_sample_dtype) obj._data = data.view(_numpy.recarray) return obj @@ -17533,7 +15999,7 @@ cdef class ClkMonStatus: @property def clk_mon_list(self): """ClkMonFaultInfo: """ - return ClkMonFaultInfo.from_ptr(&(self._ptr[0].clkMonList), 32, self._readonly) + return ClkMonFaultInfo.from_ptr(&(self._ptr[0].clkMonList), self._ptr[0].clkMonListSize, self._readonly) @clk_mon_list.setter def clk_mon_list(self, val): @@ -17555,17 +16021,6 @@ cdef class ClkMonStatus: raise ValueError("This ClkMonStatus instance is read-only") self._ptr[0].bGlobalStatus = val - @property - def clk_mon_list_size(self): - """int: """ - return self._ptr[0].clkMonListSize - - @clk_mon_list_size.setter - def clk_mon_list_size(self, val): - if self._readonly: - raise ValueError("This ClkMonStatus instance is read-only") - self._ptr[0].clkMonListSize = val - @staticmethod def from_data(data): """Create an ClkMonStatus instance wrapping the given NumPy array. @@ -19754,133 +18209,141 @@ cdef _get_prm_counter_v1_dtype_offsets(): prm_counter_v1_dtype = _get_prm_counter_v1_dtype_offsets() cdef class PRMCounter_v1: - """Empty-initialize an instance of `nvmlPRMCounter_v1_t`. + """Empty-initialize an array of `nvmlPRMCounter_v1_t`. + + The resulting object is of length `size` and of dtype `prm_counter_v1_dtype`. + If default-constructed, the instance represents a single struct. + + Args: + size (int): number of structs, default=1. .. seealso:: `nvmlPRMCounter_v1_t` """ cdef: - nvmlPRMCounter_v1_t *_ptr - object _owner - bint _owned - bint _readonly + readonly object _data - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlPRMCounter_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating PRMCounter_v1") - self._owner = None - self._owned = True - self._readonly = False - def __dealloc__(self): - cdef nvmlPRMCounter_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) + + def __init__(self, size=1): + arr = _numpy.empty(size, dtype=prm_counter_v1_dtype) + self._data = arr.view(_numpy.recarray) + assert self._data.itemsize == sizeof(nvmlPRMCounter_v1_t), \ + f"itemsize {self._data.itemsize} mismatches struct size { sizeof(nvmlPRMCounter_v1_t) }" def __repr__(self): - return f"<{__name__}.PRMCounter_v1 object at {hex(id(self))}>" + if self._data.size > 1: + return f"<{__name__}.PRMCounter_v1_Array_{self._data.size} object at {hex(id(self))}>" + else: + return f"<{__name__}.PRMCounter_v1 object at {hex(id(self))}>" @property def ptr(self): """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) + return self._data.ctypes.data cdef intptr_t _get_ptr(self): - return (self._ptr) + return self._data.ctypes.data def __int__(self): - return (self._ptr) + if self._data.size > 1: + raise TypeError("int() argument must be a bytes-like object of size 1. " + "To get the pointer address of an array, use .ptr") + return self._data.ctypes.data + + def __len__(self): + return self._data.size def __eq__(self, other): - cdef PRMCounter_v1 other_ - if not isinstance(other, PRMCounter_v1): + cdef object self_data = self._data + if (not isinstance(other, PRMCounter_v1)) or self_data.size != other._data.size or self_data.dtype != other._data.dtype: return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlPRMCounter_v1_t)) == 0) + return bool((self_data == other._data).all()) - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlPRMCounter_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating PRMCounter_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlPRMCounter_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) + @property + def counter_id(self): + """Union[~_numpy.uint32, int]: Counter ID, one of nvmlPRMCounterId_t.""" + if self._data.size == 1: + return int(self._data.counter_id[0]) + return self._data.counter_id + + @counter_id.setter + def counter_id(self, val): + self._data.counter_id = val @property def in_data(self): - """PRMCounterInput_v1: PRM input values.""" - return PRMCounterInput_v1.from_ptr(&(self._ptr[0].inData), self._readonly, self) + """prm_counter_input_v1_dtype: PRM input values.""" + return self._data.in_data @in_data.setter def in_data(self, val): - if self._readonly: - raise ValueError("This PRMCounter_v1 instance is read-only") - cdef PRMCounterInput_v1 val_ = val - memcpy(&(self._ptr[0].inData), (val_._get_ptr()), sizeof(nvmlPRMCounterInput_v1_t) * 1) + self._data.in_data = val @property def counter_value(self): - """PRMCounterValue_v1: Counter value.""" - return PRMCounterValue_v1.from_ptr(&(self._ptr[0].counterValue), self._readonly, self) + """prm_counter_value_v1_dtype: Counter value.""" + return self._data.counter_value @counter_value.setter def counter_value(self, val): - if self._readonly: - raise ValueError("This PRMCounter_v1 instance is read-only") - cdef PRMCounterValue_v1 val_ = val - memcpy(&(self._ptr[0].counterValue), (val_._get_ptr()), sizeof(nvmlPRMCounterValue_v1_t) * 1) + self._data.counter_value = val - @property - def counter_id(self): - """int: Counter ID, one of nvmlPRMCounterId_t.""" - return self._ptr[0].counterId + def __getitem__(self, key): + cdef ssize_t key_ + cdef ssize_t size + if isinstance(key, int): + key_ = key + size = self._data.size + if key_ >= size or key_ <= -(size+1): + raise IndexError("index is out of bounds") + if key_ < 0: + key_ += size + return PRMCounter_v1.from_data(self._data[key_:key_+1]) + out = self._data[key] + if isinstance(out, _numpy.recarray) and out.dtype == prm_counter_v1_dtype: + return PRMCounter_v1.from_data(out) + return out - @counter_id.setter - def counter_id(self, val): - if self._readonly: - raise ValueError("This PRMCounter_v1 instance is read-only") - self._ptr[0].counterId = val + def __setitem__(self, key, val): + self._data[key] = val @staticmethod def from_data(data): """Create an PRMCounter_v1 instance wrapping the given NumPy array. Args: - data (_numpy.ndarray): a single-element array of dtype `prm_counter_v1_dtype` holding the data. + data (_numpy.ndarray): a 1D array of dtype `prm_counter_v1_dtype` holding the data. """ - return __from_data(data, "prm_counter_v1_dtype", prm_counter_v1_dtype, PRMCounter_v1) + cdef PRMCounter_v1 obj = PRMCounter_v1.__new__(PRMCounter_v1) + if not isinstance(data, _numpy.ndarray): + raise TypeError("data argument must be a NumPy ndarray") + if data.ndim != 1: + raise ValueError("data array must be 1D") + if data.dtype != prm_counter_v1_dtype: + raise ValueError("data array must be of dtype prm_counter_v1_dtype") + obj._data = data.view(_numpy.recarray) + + return obj @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): + def from_ptr(intptr_t ptr, size_t size=1, bint readonly=False): """Create an PRMCounter_v1 instance wrapping the given pointer. Args: ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. + size (int): number of structs, default=1. readonly (bool): whether the data is read-only (to the user). default is `False`. """ if ptr == 0: raise ValueError("ptr must not be null (0)") cdef PRMCounter_v1 obj = PRMCounter_v1.__new__(PRMCounter_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlPRMCounter_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating PRMCounter_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlPRMCounter_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly + cdef flag = cpython.buffer.PyBUF_READ if readonly else cpython.buffer.PyBUF_WRITE + cdef object buf = cpython.memoryview.PyMemoryView_FromMemory( + ptr, sizeof(nvmlPRMCounter_v1_t) * size, flag) + data = _numpy.ndarray(size, buffer=buf, dtype=prm_counter_v1_dtype) + obj._data = data.view(_numpy.recarray) + return obj @@ -20825,7 +19288,7 @@ cdef class GridLicensableFeatures: @property def grid_licensable_features(self): """GridLicensableFeature: """ - return GridLicensableFeature.from_ptr(&(self._ptr[0].gridLicensableFeatures), 3, self._readonly) + return GridLicensableFeature.from_ptr(&(self._ptr[0].gridLicensableFeatures), self._ptr[0].licensableFeaturesCount, self._readonly) @grid_licensable_features.setter def grid_licensable_features(self, val): @@ -20847,17 +19310,6 @@ cdef class GridLicensableFeatures: raise ValueError("This GridLicensableFeatures instance is read-only") self._ptr[0].isGridLicenseSupported = val - @property - def licensable_features_count(self): - """int: """ - return self._ptr[0].licensableFeaturesCount - - @licensable_features_count.setter - def licensable_features_count(self, val): - if self._readonly: - raise ValueError("This GridLicensableFeatures instance is read-only") - self._ptr[0].licensableFeaturesCount = val - @staticmethod def from_data(data): """Create an GridLicensableFeatures instance wrapping the given NumPy array. @@ -21039,135 +19491,6 @@ cdef class NvLinkInfo_v2: return obj -cdef _get_prm_counter_list_v1_dtype_offsets(): - cdef nvmlPRMCounterList_v1_t pod = nvmlPRMCounterList_v1_t() - return _numpy.dtype({ - 'names': ['num_counters', 'counters'], - 'formats': [_numpy.uint32, _numpy.intp], - 'offsets': [ - (&(pod.numCounters)) - (&pod), - (&(pod.counters)) - (&pod), - ], - 'itemsize': sizeof(nvmlPRMCounterList_v1_t), - }) - -prm_counter_list_v1_dtype = _get_prm_counter_list_v1_dtype_offsets() - -cdef class PRMCounterList_v1: - """Empty-initialize an instance of `nvmlPRMCounterList_v1_t`. - - - .. seealso:: `nvmlPRMCounterList_v1_t` - """ - cdef: - nvmlPRMCounterList_v1_t *_ptr - object _owner - bint _owned - bint _readonly - dict _refs - - def __init__(self): - self._ptr = calloc(1, sizeof(nvmlPRMCounterList_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating PRMCounterList_v1") - self._owner = None - self._owned = True - self._readonly = False - self._refs = {} - - def __dealloc__(self): - cdef nvmlPRMCounterList_v1_t *ptr - if self._owned and self._ptr != NULL: - ptr = self._ptr - self._ptr = NULL - free(ptr) - - def __repr__(self): - return f"<{__name__}.PRMCounterList_v1 object at {hex(id(self))}>" - - @property - def ptr(self): - """Get the pointer address to the data as Python :class:`int`.""" - return (self._ptr) - - cdef intptr_t _get_ptr(self): - return (self._ptr) - - def __int__(self): - return (self._ptr) - - def __eq__(self, other): - cdef PRMCounterList_v1 other_ - if not isinstance(other, PRMCounterList_v1): - return False - other_ = other - return (memcmp((self._ptr), (other_._ptr), sizeof(nvmlPRMCounterList_v1_t)) == 0) - - def __setitem__(self, key, val): - if key == 0 and isinstance(val, _numpy.ndarray): - self._ptr = malloc(sizeof(nvmlPRMCounterList_v1_t)) - if self._ptr == NULL: - raise MemoryError("Error allocating PRMCounterList_v1") - memcpy(self._ptr, val.ctypes.data, sizeof(nvmlPRMCounterList_v1_t)) - self._owner = None - self._owned = True - self._readonly = not val.flags.writeable - else: - setattr(self, key, val) - - @property - def counters(self): - """int: Pointer to array of PRM counters.""" - if self._ptr[0].counters == NULL or self._ptr[0].numCounters == 0: - return [] - return PRMCounter_v1.from_ptr((self._ptr[0].counters), self._ptr[0].numCounters) - - @counters.setter - def counters(self, val): - if self._readonly: - raise ValueError("This PRMCounterList_v1 instance is read-only") - cdef PRMCounter_v1 arr = val - self._ptr[0].counters = (arr._get_ptr()) - self._ptr[0].numCounters = len(arr) - self._refs["counters"] = arr - - @staticmethod - def from_data(data): - """Create an PRMCounterList_v1 instance wrapping the given NumPy array. - - Args: - data (_numpy.ndarray): a single-element array of dtype `prm_counter_list_v1_dtype` holding the data. - """ - return __from_data(data, "prm_counter_list_v1_dtype", prm_counter_list_v1_dtype, PRMCounterList_v1) - - @staticmethod - def from_ptr(intptr_t ptr, bint readonly=False, object owner=None): - """Create an PRMCounterList_v1 instance wrapping the given pointer. - - Args: - ptr (intptr_t): pointer address as Python :class:`int` to the data. - owner (object): The Python object that owns the pointer. If not provided, data will be copied. - readonly (bool): whether the data is read-only (to the user). default is `False`. - """ - if ptr == 0: - raise ValueError("ptr must not be null (0)") - cdef PRMCounterList_v1 obj = PRMCounterList_v1.__new__(PRMCounterList_v1) - if owner is None: - obj._ptr = malloc(sizeof(nvmlPRMCounterList_v1_t)) - if obj._ptr == NULL: - raise MemoryError("Error allocating PRMCounterList_v1") - memcpy((obj._ptr), ptr, sizeof(nvmlPRMCounterList_v1_t)) - obj._owner = None - obj._owned = True - else: - obj._ptr = ptr - obj._owner = owner - obj._owned = False - obj._readonly = readonly - obj._refs = {} - return obj - - cpdef init_v2(): """Initialize NVML, but don't initialize any GPUs yet. @@ -21530,24 +19853,6 @@ cpdef intptr_t device_get_handle_by_uuid(uuid) except? 0: return device -cpdef intptr_t device_get_handle_by_uuidv(intptr_t uuid) except? 0: - """Acquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. See ``nvmlUUID_v1_t`` for more information on the UUID struct. The caller must set the appropriate version prior to calling this API. - - Args: - uuid (intptr_t): The UUID of the target GPU or MIG instance. - - Returns: - intptr_t: Reference in which to return the device handle or MIG device handle. - - .. seealso:: `nvmlDeviceGetHandleByUUIDV` - """ - cdef Device device - with nogil: - __status__ = nvmlDeviceGetHandleByUUIDV(uuid, &device) - check_status(__status__) - return device - - cpdef intptr_t device_get_handle_by_pci_bus_id_v2(pci_bus_id) except? 0: """Acquire the handle for a particular device, based on its PCI bus id. @@ -22700,26 +21005,6 @@ cpdef device_set_clock_offsets(intptr_t device, intptr_t info): check_status(__status__) -cpdef object device_get_performance_modes(intptr_t device): - """Retrieves a performance mode string with all the performance modes defined for this device along with their associated GPU Clock and Memory Clock values. Not all tokens will be reported on all GPUs, and additional tokens may be added in the future. For backwards compatibility we still provide nvclock and memclock; those are the same as nvclockmin and memclockmin. - - Args: - device (intptr_t): The identifier of the target device. - - Returns: - nvmlDevicePerfModes_v1_t: Reference in which to return the performance level string. - - .. seealso:: `nvmlDeviceGetPerformanceModes` - """ - cdef DevicePerfModes_v1 perf_modes_py = DevicePerfModes_v1() - cdef nvmlDevicePerfModes_t *perf_modes = (perf_modes_py._get_ptr()) - perf_modes.version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlDeviceGetPerformanceModes(device, perf_modes) - check_status(__status__) - return perf_modes_py - - cpdef object device_get_current_clock_freqs(intptr_t device): """Retrieves a string with the associated current GPU Clock and Memory Clock values. @@ -22934,42 +21219,6 @@ cpdef tuple device_get_cuda_compute_capability(intptr_t device): return (major, minor) -cpdef tuple device_get_dram_encryption_mode(intptr_t device): - """Retrieves the current and pending DRAM Encryption modes for the device. - - Args: - device (intptr_t): The identifier of the target device. - - Returns: - A 2-tuple containing: - - - nvmlDramEncryptionInfo_v1_t: Reference in which to return the current DRAM Encryption mode. - - nvmlDramEncryptionInfo_v1_t: Reference in which to return the pending DRAM Encryption mode. - - .. seealso:: `nvmlDeviceGetDramEncryptionMode` - """ - cdef nvmlDramEncryptionInfo_t current - cdef nvmlDramEncryptionInfo_t pending - with nogil: - __status__ = nvmlDeviceGetDramEncryptionMode(device, ¤t, &pending) - check_status(__status__) - return (current, pending) - - -cpdef device_set_dram_encryption_mode(intptr_t device, intptr_t dram_encryption): - """Set the DRAM Encryption mode for the device. - - Args: - device (intptr_t): The identifier of the target device. - dram_encryption (intptr_t): The target DRAM Encryption mode. - - .. seealso:: `nvmlDeviceSetDramEncryptionMode` - """ - with nogil: - __status__ = nvmlDeviceSetDramEncryptionMode(device, dram_encryption) - check_status(__status__) - - cpdef tuple device_get_ecc_mode(intptr_t device): """Retrieves the current and pending ECC modes for the device. @@ -23706,42 +21955,6 @@ cpdef object device_get_conf_compute_gpu_certificate(intptr_t device): return gpu_cert_py -cpdef object device_get_conf_compute_gpu_attestation_report(intptr_t device): - """Get Conf Computing GPU attestation report. - - Args: - device (intptr_t): The identifier of the target device. - - Returns: - nvmlConfComputeGpuAttestationReport_t: Reference in which to return the gpu attestation report. - - .. seealso:: `nvmlDeviceGetConfComputeGpuAttestationReport` - """ - cdef ConfComputeGpuAttestationReport gpu_atst_report_py = ConfComputeGpuAttestationReport() - cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = (gpu_atst_report_py._get_ptr()) - with nogil: - __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(device, gpu_atst_report) - check_status(__status__) - return gpu_atst_report_py - - -cpdef object system_get_conf_compute_key_rotation_threshold_info(): - """Get Conf Computing key rotation threshold detail. - - Returns: - nvmlConfComputeGetKeyRotationThresholdInfo_v1_t: Reference in which to return the key rotation threshold data. - - .. seealso:: `nvmlSystemGetConfComputeKeyRotationThresholdInfo` - """ - cdef ConfComputeGetKeyRotationThresholdInfo_v1 p_key_rotation_thr_info_py = ConfComputeGetKeyRotationThresholdInfo_v1() - cdef nvmlConfComputeGetKeyRotationThresholdInfo_t *p_key_rotation_thr_info = (p_key_rotation_thr_info_py._get_ptr()) - p_key_rotation_thr_info.version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info) - check_status(__status__) - return p_key_rotation_thr_info_py - - cpdef device_set_conf_compute_unprotected_mem_size(intptr_t device, unsigned long long size_ki_b): """Set Conf Computing Unprotected Memory Size. @@ -23769,19 +21982,6 @@ cpdef system_set_conf_compute_gpus_ready_state(unsigned int is_accepting_work): check_status(__status__) -cpdef system_set_conf_compute_key_rotation_threshold_info(intptr_t p_key_rotation_thr_info): - """Set Conf Computing key rotation threshold. - - Args: - p_key_rotation_thr_info (intptr_t): Reference to the key rotation threshold data. - - .. seealso:: `nvmlSystemSetConfComputeKeyRotationThresholdInfo` - """ - with nogil: - __status__ = nvmlSystemSetConfComputeKeyRotationThresholdInfo(p_key_rotation_thr_info) - check_status(__status__) - - cpdef object system_get_conf_compute_settings(): """Get Conf Computing System Settings. @@ -24282,35 +22482,6 @@ cpdef device_set_fan_control_policy(intptr_t device, unsigned int fan, unsigned check_status(__status__) -cpdef device_set_temperature_threshold(intptr_t device, int threshold_type, intptr_t temp): - """Sets the temperature threshold for the GPU with the specified threshold type in degrees C. - - Args: - device (intptr_t): The identifier of the target device. - threshold_type (TemperatureThresholds): The type of threshold value to be set. - temp (intptr_t): Reference which hold the value to be set. - - .. seealso:: `nvmlDeviceSetTemperatureThreshold` - """ - with nogil: - __status__ = nvmlDeviceSetTemperatureThreshold(device, <_TemperatureThresholds>threshold_type, temp) - check_status(__status__) - - -cpdef device_set_power_management_limit(intptr_t device, unsigned int limit): - """Set new power limit of this device. - - Args: - device (intptr_t): The identifier of the target device. - limit (unsigned int): Power management limit in milliwatts to set. - - .. seealso:: `nvmlDeviceSetPowerManagementLimit` - """ - with nogil: - __status__ = nvmlDeviceSetPowerManagementLimit(device, limit) - check_status(__status__) - - cpdef device_set_gpu_operation_mode(intptr_t device, int mode): """Sets new GOM. See ``nvmlGpuOperationMode_t`` for details. @@ -24382,20 +22553,6 @@ cpdef device_clear_accounting_pids(intptr_t device): check_status(__status__) -cpdef device_set_power_management_limit_v2(intptr_t device, intptr_t power_value): - """Set new power limit of this device. - - Args: - device (intptr_t): The identifier of the target device. - power_value (intptr_t): Power management limit in milliwatts to set. - - .. seealso:: `nvmlDeviceSetPowerManagementLimit_v2` - """ - with nogil: - __status__ = nvmlDeviceSetPowerManagementLimit_v2(device, power_value) - check_status(__status__) - - cpdef int device_get_nvlink_state(intptr_t device, unsigned int link) except? -1: """Retrieves the state of the device's NvLink for the link specified. @@ -24527,20 +22684,6 @@ cpdef int device_get_nvlink_remote_device_type(intptr_t device, unsigned int lin return p_nv_link_device_type -cpdef device_set_nvlink_device_low_power_threshold(intptr_t device, intptr_t info): - """Set NvLink Low Power Threshold for device. - - Args: - device (intptr_t): The identifier of the target device. - info (intptr_t): Reference to ``nvmlNvLinkPowerThres_t`` struct input parameters. - - .. seealso:: `nvmlDeviceSetNvLinkDeviceLowPowerThreshold` - """ - with nogil: - __status__ = nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info) - check_status(__status__) - - cpdef system_set_nvlink_bw_mode(unsigned int nvlink_bw_mode): """Set the global nvlink bandwith mode. @@ -24815,80 +22958,6 @@ cpdef device_set_virtualization_mode(intptr_t device, int virtual_mode): check_status(__status__) -cpdef object device_get_vgpu_heterogeneous_mode(intptr_t device): - """Get the vGPU heterogeneous mode for the device. - - Args: - device (intptr_t): The identifier of the target device. - - Returns: - nvmlVgpuHeterogeneousMode_v1_t: Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t. - - .. seealso:: `nvmlDeviceGetVgpuHeterogeneousMode` - """ - cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1() - cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = (p_heterogeneous_mode_py._get_ptr()) - p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlDeviceGetVgpuHeterogeneousMode(device, p_heterogeneous_mode) - check_status(__status__) - return p_heterogeneous_mode_py - - -cpdef device_set_vgpu_heterogeneous_mode(intptr_t device, intptr_t p_heterogeneous_mode): - """Enable or disable vGPU heterogeneous mode for the device. - - Args: - device (intptr_t): Identifier of the target device. - p_heterogeneous_mode (intptr_t): Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t. - - .. seealso:: `nvmlDeviceSetVgpuHeterogeneousMode` - """ - with nogil: - __status__ = nvmlDeviceSetVgpuHeterogeneousMode(device, p_heterogeneous_mode) - check_status(__status__) - - -cpdef object vgpu_instance_get_placement_id(unsigned int vgpu_instance): - """Query the placement ID of active vGPU instance. - - Args: - vgpu_instance (unsigned int): Identifier of the target vGPU instance. - - Returns: - nvmlVgpuPlacementId_v1_t: Pointer to vGPU placement ID structure ``nvmlVgpuPlacementId_t``. - - .. seealso:: `nvmlVgpuInstanceGetPlacementId` - """ - cdef VgpuPlacementId_v1 p_placement_py = VgpuPlacementId_v1() - cdef nvmlVgpuPlacementId_t *p_placement = (p_placement_py._get_ptr()) - p_placement.version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlVgpuInstanceGetPlacementId(vgpu_instance, p_placement) - check_status(__status__) - return p_placement_py - - -cpdef object device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id): - """Query the supported vGPU placement ID of the vGPU type. - - Args: - device (intptr_t): Identifier of the target device. - vgpu_type_id (unsigned int): Handle to vGPU type. The vGPU type ID. - - Returns: - nvmlVgpuPlacementList_v2_t: Pointer to the vGPU placement structure ``nvmlVgpuPlacementList_t``. - - .. seealso:: `nvmlDeviceGetVgpuTypeSupportedPlacements` - """ - cdef VgpuPlacementList_v2 p_placement_list_py = VgpuPlacementList_v2() - cdef nvmlVgpuPlacementList_t *p_placement_list = (p_placement_list_py._get_ptr()) - with nogil: - __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(device, vgpu_type_id, p_placement_list) - check_status(__status__) - return p_placement_list_py - - cpdef unsigned long long vgpu_type_get_gsp_heap_size(unsigned int vgpu_type_id) except? 0: """Retrieve the static GSP heap size of the vGPU type in bytes. @@ -24925,26 +22994,6 @@ cpdef unsigned long long vgpu_type_get_fb_reservation(unsigned int vgpu_type_id) return fb_reservation -cpdef object vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance): - """Retrieve the currently used runtime state size of the vGPU instance. - - Args: - vgpu_instance (unsigned int): Identifier of the target vGPU instance. - - Returns: - nvmlVgpuRuntimeState_v1_t: Pointer to the vGPU runtime state's structure ``nvmlVgpuRuntimeState_t``. - - .. seealso:: `nvmlVgpuInstanceGetRuntimeStateSize` - """ - cdef VgpuRuntimeState_v1 p_state_py = VgpuRuntimeState_v1() - cdef nvmlVgpuRuntimeState_t *p_state = (p_state_py._get_ptr()) - p_state.version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlVgpuInstanceGetRuntimeStateSize(vgpu_instance, p_state) - check_status(__status__) - return p_state_py - - cpdef device_set_vgpu_capabilities(intptr_t device, int capability, int state): """Set the desirable vGPU capability of a device. @@ -25153,19 +23202,20 @@ cpdef tuple vgpu_type_get_resolution(unsigned int vgpu_type_id, unsigned int dis return (xdim, ydim) -cpdef vgpu_type_get_license(unsigned int vgpu_type_id, intptr_t vgpu_type_license_string, unsigned int size): +cpdef str vgpu_type_get_license(unsigned int vgpu_type_id): """Retrieve license requirements for a vGPU type. Args: vgpu_type_id (unsigned int): Handle to vGPU type. - vgpu_type_license_string (intptr_t): Pointer to buffer to return license info. - size (unsigned int): Size of ``vgpu_type_license_string`` buffer. .. seealso:: `nvmlVgpuTypeGetLicense` """ + cdef unsigned int size = 128 + cdef char[128] vgpu_type_license_string with nogil: - __status__ = nvmlVgpuTypeGetLicense(vgpu_type_id, vgpu_type_license_string, size) + __status__ = nvmlVgpuTypeGetLicense(vgpu_type_id, vgpu_type_license_string, size) check_status(__status__) + return cpython.PyUnicode_FromString(vgpu_type_license_string) cpdef unsigned int vgpu_type_get_frame_rate_limit(unsigned int vgpu_type_id) except? 0: @@ -25559,23 +23609,6 @@ cpdef str vgpu_instance_get_mdev_uuid(unsigned int vgpu_instance): return cpython.PyUnicode_FromString(mdev_uuid) -cpdef object vgpu_type_get_max_instances_per_gpu_instance(): - """Retrieve the maximum number of vGPU instances per GPU instance for given vGPU type. - - Returns: - nvmlVgpuTypeMaxInstance_v1_t: Pointer to the caller-provided structure of nvmlVgpuTypeMaxInstance_t. - - .. seealso:: `nvmlVgpuTypeGetMaxInstancesPerGpuInstance` - """ - cdef VgpuTypeMaxInstance_v1 p_max_instance_py = VgpuTypeMaxInstance_v1() - cdef nvmlVgpuTypeMaxInstance_t *p_max_instance = (p_max_instance_py._get_ptr()) - p_max_instance.version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(p_max_instance) - check_status(__status__) - return p_max_instance_py - - cpdef gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_scheduler): """Set vGPU scheduler state for the given GPU instance. @@ -25585,6 +23618,7 @@ cpdef gpu_instance_set_vgpu_scheduler_state(intptr_t gpu_instance, intptr_t p_sc .. seealso:: `nvmlGpuInstanceSetVgpuSchedulerState` """ + (p_scheduler).version = sizeof(nvmlVgpuSchedulerState_v1_t) | (1 << 24) with nogil: __status__ = nvmlGpuInstanceSetVgpuSchedulerState(gpu_instance, p_scheduler) check_status(__status__) @@ -25630,40 +23664,6 @@ cpdef object gpu_instance_get_vgpu_scheduler_log(intptr_t gpu_instance): return p_scheduler_log_info_py -cpdef object gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance): - """Get the vGPU heterogeneous mode for the GPU instance. - - Args: - gpu_instance (intptr_t): The GPU instance handle. - - Returns: - nvmlVgpuHeterogeneousMode_v1_t: Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t. - - .. seealso:: `nvmlGpuInstanceGetVgpuHeterogeneousMode` - """ - cdef VgpuHeterogeneousMode_v1 p_heterogeneous_mode_py = VgpuHeterogeneousMode_v1() - cdef nvmlVgpuHeterogeneousMode_t *p_heterogeneous_mode = (p_heterogeneous_mode_py._get_ptr()) - p_heterogeneous_mode.version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(gpu_instance, p_heterogeneous_mode) - check_status(__status__) - return p_heterogeneous_mode_py - - -cpdef gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, intptr_t p_heterogeneous_mode): - """Enable or disable vGPU heterogeneous mode for the GPU instance. - - Args: - gpu_instance (intptr_t): The GPU instance handle. - p_heterogeneous_mode (intptr_t): Pointer to the caller-provided structure of nvmlVgpuHeterogeneousMode_t. - - .. seealso:: `nvmlGpuInstanceSetVgpuHeterogeneousMode` - """ - with nogil: - __status__ = nvmlGpuInstanceSetVgpuHeterogeneousMode(gpu_instance, p_heterogeneous_mode) - check_status(__status__) - - cpdef str device_get_pgpu_metadata_string(intptr_t device): """Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format. @@ -25770,31 +23770,6 @@ cpdef set_vgpu_version(intptr_t vgpu_version): check_status(__status__) -cpdef tuple device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp): - """Retrieves current utilization for vGPUs on a physical GPU (device). - - Args: - device (intptr_t): The identifier for the target device. - last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp. - - Returns: - A 3-tuple containing: - - - int: Pointer to caller-supplied buffer to hold the type of returned sample values. - - unsigned int: Pointer to caller-supplied array size, and returns number of vGPU instances. - - nvmlVgpuInstanceUtilizationSample_t: Pointer to caller-supplied buffer in which vGPU utilization samples are returned. - - .. seealso:: `nvmlDeviceGetVgpuUtilization` - """ - cdef _ValueType sample_val_type - cdef unsigned int vgpu_instance_samples_count - cdef nvmlVgpuInstanceUtilizationSample_t utilization_samples - with nogil: - __status__ = nvmlDeviceGetVgpuUtilization(device, last_seen_time_stamp, &sample_val_type, &vgpu_instance_samples_count, &utilization_samples) - check_status(__status__) - return (sample_val_type, vgpu_instance_samples_count, utilization_samples) - - cpdef tuple device_get_vgpu_process_utilization(intptr_t device, unsigned long long last_seen_time_stamp): """Retrieves current utilization for processes running on vGPUs on a physical GPU (device). @@ -25977,33 +23952,12 @@ cpdef tuple device_get_mig_mode(intptr_t device): .. seealso:: `nvmlDeviceGetMigMode` """ - cdef unsigned int current_mode - cdef unsigned int pending_mode - with nogil: - __status__ = nvmlDeviceGetMigMode(device, ¤t_mode, &pending_mode) - check_status(__status__) - return (current_mode, pending_mode) - - -cpdef object device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile): - """Versioned wrapper around ``nvmlDeviceGetGpuInstanceProfileInfo`` that accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure. - - Args: - device (intptr_t): The identifier of the target device. - profile (unsigned int): One of the NVML_GPU_INSTANCE_PROFILE_*. - - Returns: - nvmlGpuInstanceProfileInfo_v2_t: Returns detailed profile information. - - .. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoV` - """ - cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2() - cdef nvmlGpuInstanceProfileInfo_v2_t *info = (info_py._get_ptr()) - info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24) + cdef unsigned int current_mode + cdef unsigned int pending_mode with nogil: - __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(device, profile, info) + __status__ = nvmlDeviceGetMigMode(device, ¤t_mode, &pending_mode) check_status(__status__) - return info_py + return (current_mode, pending_mode) cpdef object device_get_gpu_instance_possible_placements_v2(intptr_t device, unsigned int profile_id): @@ -26176,1596 +24130,1980 @@ cpdef unsigned int gpu_instance_get_compute_instance_remaining_capacity(intptr_t with nogil: __status__ = nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpu_instance, profile_id, &count) check_status(__status__) - return count + return count + + +cpdef object gpu_instance_get_compute_instance_possible_placements(intptr_t gpu_instance, unsigned int profile_id): + """Get compute instance placements. + + Args: + gpu_instance (intptr_t): The identifier of the target GPU instance. + profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``. + + .. seealso:: `nvmlGpuInstanceGetComputeInstancePossiblePlacements` + """ + cdef unsigned int[1] count = [0] + with nogil: + __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpu_instance, profile_id, NULL, count) + check_status_size(__status__) + cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0]) + cdef nvmlComputeInstancePlacement_t *placements_ptr = (placements._get_ptr()) + if count[0] == 0: + return placements + with nogil: + __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpu_instance, profile_id, placements_ptr, count) + check_status(__status__) + return placements + + +cpdef intptr_t gpu_instance_create_compute_instance(intptr_t gpu_instance, unsigned int profile_id) except? 0: + """Create compute instance. + + Args: + gpu_instance (intptr_t): The identifier of the target GPU instance. + profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``. + + Returns: + intptr_t: Returns the compute instance handle. + + .. seealso:: `nvmlGpuInstanceCreateComputeInstance` + """ + cdef ComputeInstance compute_instance + with nogil: + __status__ = nvmlGpuInstanceCreateComputeInstance(gpu_instance, profile_id, &compute_instance) + check_status(__status__) + return compute_instance + + +cpdef intptr_t gpu_instance_create_compute_instance_with_placement(intptr_t gpu_instance, unsigned int profile_id, intptr_t placement) except? 0: + """Create compute instance with the specified placement. + + Args: + gpu_instance (intptr_t): The identifier of the target GPU instance. + profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``. + placement (intptr_t): The requested placement. See ``nvmlGpuInstanceGetComputeInstancePossiblePlacements``. + + Returns: + intptr_t: Returns the compute instance handle. + + .. seealso:: `nvmlGpuInstanceCreateComputeInstanceWithPlacement` + """ + cdef ComputeInstance compute_instance + with nogil: + __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpu_instance, profile_id, placement, &compute_instance) + check_status(__status__) + return compute_instance + + +cpdef compute_instance_destroy(intptr_t compute_instance): + """Destroy compute instance. + + Args: + compute_instance (intptr_t): The compute instance handle. + + .. seealso:: `nvmlComputeInstanceDestroy` + """ + with nogil: + __status__ = nvmlComputeInstanceDestroy(compute_instance) + check_status(__status__) + + +cpdef intptr_t gpu_instance_get_compute_instance_by_id(intptr_t gpu_instance, unsigned int id) except? 0: + """Get compute instance for given instance ID. + + Args: + gpu_instance (intptr_t): The identifier of the target GPU instance. + id (unsigned int): The compute instance ID. + + Returns: + intptr_t: Returns compute instance. + + .. seealso:: `nvmlGpuInstanceGetComputeInstanceById` + """ + cdef ComputeInstance compute_instance + with nogil: + __status__ = nvmlGpuInstanceGetComputeInstanceById(gpu_instance, id, &compute_instance) + check_status(__status__) + return compute_instance + + +cpdef object compute_instance_get_info_v2(intptr_t compute_instance): + """Get compute instance information. + + Args: + compute_instance (intptr_t): The compute instance handle. + + Returns: + nvmlComputeInstanceInfo_t: Return compute instance information. + + .. seealso:: `nvmlComputeInstanceGetInfo_v2` + """ + cdef ComputeInstanceInfo info_py = ComputeInstanceInfo() + cdef nvmlComputeInstanceInfo_t *info = (info_py._get_ptr()) + with nogil: + __status__ = nvmlComputeInstanceGetInfo_v2(compute_instance, info) + check_status(__status__) + return info_py + + +cpdef unsigned int device_is_mig_device_handle(intptr_t device) except? 0: + """Test if the given handle refers to a MIG device. + + Args: + device (intptr_t): NVML handle to test. + + Returns: + unsigned int: True when handle refers to a MIG device. + + .. seealso:: `nvmlDeviceIsMigDeviceHandle` + """ + cdef unsigned int is_mig_device + with nogil: + __status__ = nvmlDeviceIsMigDeviceHandle(device, &is_mig_device) + check_status(__status__) + return is_mig_device + + +cpdef unsigned int device_get_gpu_instance_id(intptr_t device) except? 0: + """Get GPU instance ID for the given MIG device handle. + + Args: + device (intptr_t): Target MIG device handle. + + Returns: + unsigned int: GPU instance ID. + + .. seealso:: `nvmlDeviceGetGpuInstanceId` + """ + cdef unsigned int id + with nogil: + __status__ = nvmlDeviceGetGpuInstanceId(device, &id) + check_status(__status__) + return id + + +cpdef unsigned int device_get_compute_instance_id(intptr_t device) except? 0: + """Get compute instance ID for the given MIG device handle. + + Args: + device (intptr_t): Target MIG device handle. + + Returns: + unsigned int: Compute instance ID. + + .. seealso:: `nvmlDeviceGetComputeInstanceId` + """ + cdef unsigned int id + with nogil: + __status__ = nvmlDeviceGetComputeInstanceId(device, &id) + check_status(__status__) + return id + + +cpdef unsigned int device_get_max_mig_device_count(intptr_t device) except? 0: + """Get the maximum number of MIG devices that can exist under a given parent NVML device. + + Args: + device (intptr_t): Target device handle. + + Returns: + unsigned int: Count of MIG devices. + + .. seealso:: `nvmlDeviceGetMaxMigDeviceCount` + """ + cdef unsigned int count + with nogil: + __status__ = nvmlDeviceGetMaxMigDeviceCount(device, &count) + check_status(__status__) + return count + + +cpdef intptr_t device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) except? 0: + """Get MIG device handle for the given ind_ex under its parent NVML device. + + Args: + device (intptr_t): Reference to the parent GPU device handle. + ind_ex (unsigned int): Index of the MIG device. + + Returns: + intptr_t: Reference to the MIG device handle. + + .. seealso:: `nvmlDeviceGetMigDeviceHandleByIndex` + """ + cdef Device mig_device + with nogil: + __status__ = nvmlDeviceGetMigDeviceHandleByIndex(device, ind_ex, &mig_device) + check_status(__status__) + return mig_device + + +cpdef intptr_t device_get_device_handle_from_mig_device_handle(intptr_t mig_device) except? 0: + """Get parent device handle from a MIG device handle. + + Args: + mig_device (intptr_t): MIG device handle. + + Returns: + intptr_t: Device handle. + + .. seealso:: `nvmlDeviceGetDeviceHandleFromMigDeviceHandle` + """ + cdef Device device + with nogil: + __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(mig_device, &device) + check_status(__status__) + return device + + +cpdef device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile): + """Activiate a specific preset profile for datacenter power smoothing. The API only sets the active preset profile based on the input profileId, and ignores the other parameters of the structure. Requires root/admin permissions. + + Args: + device (intptr_t): The identifier of the target device. + profile (intptr_t): Reference to ``nvmlPowerSmoothingProfile_v1_t``. Note that only ``profile->profileId`` is used and the rest of the structure is ignored. + + .. seealso:: `nvmlDevicePowerSmoothingActivatePresetProfile` + """ + with nogil: + __status__ = nvmlDevicePowerSmoothingActivatePresetProfile(device, profile) + check_status(__status__) + + +cpdef device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile): + """Update the value of a specific profile parameter contained within ``nvmlPowerSmoothingProfile_v1_t``. Requires root/admin permissions. + + Args: + device (intptr_t): The identifier of the target device. + profile (intptr_t): Reference to ``nvmlPowerSmoothingProfile_v1_t`` struct. + + .. seealso:: `nvmlDevicePowerSmoothingUpdatePresetProfileParam` + """ + with nogil: + __status__ = nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile) + check_status(__status__) -cpdef object gpu_instance_get_compute_instance_possible_placements(intptr_t gpu_instance, unsigned int profile_id): - """Get compute instance placements. +cpdef device_power_smoothing_set_state(intptr_t device, intptr_t state): + """Enable or disable the Power Smoothing Feature. Requires root/admin permissions. Args: - gpu_instance (intptr_t): The identifier of the target GPU instance. - profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``. + device (intptr_t): The identifier of the target device. + state (intptr_t): Reference to ``nvmlPowerSmoothingState_v1_t``. - .. seealso:: `nvmlGpuInstanceGetComputeInstancePossiblePlacements` + .. seealso:: `nvmlDevicePowerSmoothingSetState` """ - cdef unsigned int[1] count = [0] - with nogil: - __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpu_instance, profile_id, NULL, count) - check_status_size(__status__) - cdef ComputeInstancePlacement placements = ComputeInstancePlacement(count[0]) - cdef nvmlComputeInstancePlacement_t *placements_ptr = (placements._get_ptr()) - if count[0] == 0: - return placements with nogil: - __status__ = nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpu_instance, profile_id, placements_ptr, count) + __status__ = nvmlDevicePowerSmoothingSetState(device, state) check_status(__status__) - return placements -cpdef intptr_t gpu_instance_create_compute_instance(intptr_t gpu_instance, unsigned int profile_id) except? 0: - """Create compute instance. +cpdef object device_get_addressing_mode(intptr_t device): + """Get the addressing mode for a given GPU. Addressing modes can be one of:. Args: - gpu_instance (intptr_t): The identifier of the target GPU instance. - profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``. + device (intptr_t): The device handle. Returns: - intptr_t: Returns the compute instance handle. + nvmlDeviceAddressingMode_v1_t: Pointer to addressing mode of the device. - .. seealso:: `nvmlGpuInstanceCreateComputeInstance` + .. seealso:: `nvmlDeviceGetAddressingMode` """ - cdef ComputeInstance compute_instance + cdef DeviceAddressingMode_v1 mode_py = DeviceAddressingMode_v1() + cdef nvmlDeviceAddressingMode_t *mode = (mode_py._get_ptr()) + mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24) with nogil: - __status__ = nvmlGpuInstanceCreateComputeInstance(gpu_instance, profile_id, &compute_instance) + __status__ = nvmlDeviceGetAddressingMode(device, mode) check_status(__status__) - return compute_instance + return mode_py -cpdef intptr_t gpu_instance_create_compute_instance_with_placement(intptr_t gpu_instance, unsigned int profile_id, intptr_t placement) except? 0: - """Create compute instance with the specified placement. +cpdef object device_get_repair_status(intptr_t device): + """Get the repair status for TPC/Channel repair. Args: - gpu_instance (intptr_t): The identifier of the target GPU instance. - profile_id (unsigned int): The compute instance profile ID. See ``nvmlGpuInstanceGetComputeInstanceProfileInfo``. - placement (intptr_t): The requested placement. See ``nvmlGpuInstanceGetComputeInstancePossiblePlacements``. + device (intptr_t): The identifier of the target device. Returns: - intptr_t: Returns the compute instance handle. + nvmlRepairStatus_v1_t: Reference to ``nvmlRepairStatus_t``. - .. seealso:: `nvmlGpuInstanceCreateComputeInstanceWithPlacement` + .. seealso:: `nvmlDeviceGetRepairStatus` """ - cdef ComputeInstance compute_instance + cdef RepairStatus_v1 repair_status_py = RepairStatus_v1() + cdef nvmlRepairStatus_t *repair_status = (repair_status_py._get_ptr()) + repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24) with nogil: - __status__ = nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpu_instance, profile_id, placement, &compute_instance) + __status__ = nvmlDeviceGetRepairStatus(device, repair_status) check_status(__status__) - return compute_instance + return repair_status_py -cpdef compute_instance_destroy(intptr_t compute_instance): - """Destroy compute instance. +cpdef object device_get_power_mizer_mode_v1(intptr_t device): + """Retrieves current power mizer mode on this device. Args: - compute_instance (intptr_t): The compute instance handle. + device (intptr_t): The identifier of the target device. - .. seealso:: `nvmlComputeInstanceDestroy` + Returns: + nvmlDevicePowerMizerModes_v1_t: Reference in which to return the power mizer mode. + + .. seealso:: `nvmlDeviceGetPowerMizerMode_v1` """ + cdef DevicePowerMizerModes_v1 power_mizer_mode_py = DevicePowerMizerModes_v1() + cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = (power_mizer_mode_py._get_ptr()) with nogil: - __status__ = nvmlComputeInstanceDestroy(compute_instance) + __status__ = nvmlDeviceGetPowerMizerMode_v1(device, power_mizer_mode) check_status(__status__) + return power_mizer_mode_py -cpdef intptr_t gpu_instance_get_compute_instance_by_id(intptr_t gpu_instance, unsigned int id) except? 0: - """Get compute instance for given instance ID. +cpdef device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode): + """Sets the new power mizer mode. Args: - gpu_instance (intptr_t): The identifier of the target GPU instance. - id (unsigned int): The compute instance ID. - - Returns: - intptr_t: Returns compute instance. + device (intptr_t): The identifier of the target device. + power_mizer_mode (intptr_t): Reference in which to set the power mizer mode. - .. seealso:: `nvmlGpuInstanceGetComputeInstanceById` + .. seealso:: `nvmlDeviceSetPowerMizerMode_v1` """ - cdef ComputeInstance compute_instance with nogil: - __status__ = nvmlGpuInstanceGetComputeInstanceById(gpu_instance, id, &compute_instance) + __status__ = nvmlDeviceSetPowerMizerMode_v1(device, power_mizer_mode) check_status(__status__) - return compute_instance -cpdef object compute_instance_get_info_v2(intptr_t compute_instance): - """Get compute instance information. +cpdef object system_get_topology_gpu_set(unsigned int cpuNumber): + """Retrieve the set of GPUs that have a CPU affinity with the given CPU number Args: - compute_instance (intptr_t): The compute instance handle. + cpuNumber (unsigned int): The CPU number Returns: - nvmlComputeInstanceInfo_t: Return compute instance information. - - .. seealso:: `nvmlComputeInstanceGetInfo_v2` + array: An array of device handles for GPUs found with affinity to cpuNumber """ - cdef ComputeInstanceInfo info_py = ComputeInstanceInfo() - cdef nvmlComputeInstanceInfo_t *info = (info_py._get_ptr()) + cdef unsigned int[1] count = [0] with nogil: - __status__ = nvmlComputeInstanceGetInfo_v2(compute_instance, info) + __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, count, NULL) + check_status_size(__status__) + if count[0] == 0: + return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] + cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") + with nogil: + __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, count, deviceArray.data) check_status(__status__) - return info_py - + return deviceArray -cpdef unsigned int device_is_mig_device_handle(intptr_t device) except? 0: - """Test if the given handle refers to a MIG device. - Args: - device (intptr_t): NVML handle to test. +cpdef str system_get_driver_branch(): + """Retrieves the driver branch of the NVIDIA driver installed on the system. Returns: - unsigned int: True when handle refers to a MIG device. - - .. seealso:: `nvmlDeviceIsMigDeviceHandle` + str: driver branch. """ - cdef unsigned int is_mig_device + cdef nvmlSystemDriverBranchInfo_t info + # Calculation copied from the macro NVML_STRUCT_VERSION in nvml.h + # Needs to be updated if the version of the nvmlSystemDriverBranchInfo_t + # struct changes in the future. + info.version = sizeof(nvmlSystemDriverBranchInfo_v1_t) | (1 << 24) + cdef unsigned int length = 80 with nogil: - __status__ = nvmlDeviceIsMigDeviceHandle(device, &is_mig_device) + __status__ = nvmlSystemGetDriverBranch(&info, length) check_status(__status__) - return is_mig_device + return cpython.PyUnicode_FromString(info.branch) -cpdef unsigned int device_get_gpu_instance_id(intptr_t device) except? 0: - """Get GPU instance ID for the given MIG device handle. +cpdef object unit_get_devices(intptr_t unit): + """Retrieves the set of GPU devices that are attached to the specified unit. Args: - device (intptr_t): Target MIG device handle. + unit (Unit): The identifier of the target unit. Returns: - unsigned int: GPU instance ID. - - .. seealso:: `nvmlDeviceGetGpuInstanceId` + array: An array of device handles for GPUs attached to the unit. """ - cdef unsigned int id + cdef unsigned int[1] deviceCount = [0] with nogil: - __status__ = nvmlDeviceGetGpuInstanceId(device, &id) + __status__ = nvmlUnitGetDevices(unit, deviceCount, NULL) + check_status_size(__status__) + if deviceCount[0] == 0: + return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] + cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="P", mode="c") + with nogil: + __status__ = nvmlUnitGetDevices(unit, deviceCount, deviceArray.data) check_status(__status__) - return id + return deviceArray -cpdef unsigned int device_get_compute_instance_id(intptr_t device) except? 0: - """Get compute instance ID for the given MIG device handle. +cpdef object device_get_topology_nearest_gpus(intptr_t device, unsigned int level): + """Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level Args: - device (intptr_t): Target MIG device handle. + device (Device): The identifier of the first device + level (GpuTopologyLevel): The level to search for other GPUs Returns: - unsigned int: Compute instance ID. - - .. seealso:: `nvmlDeviceGetComputeInstanceId` + array: An array of device handles for GPUs found at level """ - cdef unsigned int id + cdef unsigned int[1] count = [0] with nogil: - __status__ = nvmlDeviceGetComputeInstanceId(device, &id) + __status__ = nvmlDeviceGetTopologyNearestGpus( + device, + level, + count, + NULL + ) + check_status_size(__status__) + if count[0] == 0: + return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] + cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") + with nogil: + __status__ = nvmlDeviceGetTopologyNearestGpus( + device, + level, + count, + deviceArray.data + ) check_status(__status__) - return id + return deviceArray -cpdef unsigned int device_get_max_mig_device_count(intptr_t device) except? 0: - """Get the maximum number of MIG devices that can exist under a given parent NVML device. +cpdef object device_get_temperature_v(intptr_t device, nvmlTemperatureSensors_t sensorType): + """Retrieves the current temperature readings (in degrees C) for the given device. Args: - device (intptr_t): Target device handle. + device (intptr_t): Target device identifier. Returns: - unsigned int: Count of MIG devices. + nvmlTemperature_v1_t: Structure specifying the sensor type (input) and retrieved temperature value (output). - .. seealso:: `nvmlDeviceGetMaxMigDeviceCount` + .. seealso:: `nvmlDeviceGetTemperatureV` """ - cdef unsigned int count + cdef nvmlTemperature_v1_t[1] temperature + temperature[0].version = sizeof(nvmlTemperature_v1_t) | (1 << 24) + temperature[0].sensorType = sensorType + with nogil: - __status__ = nvmlDeviceGetMaxMigDeviceCount(device, &count) + __status__ = nvmlDeviceGetTemperatureV(device, temperature) check_status(__status__) - return count + return temperature.temperature -cpdef intptr_t device_get_mig_device_handle_by_index(intptr_t device, unsigned int ind_ex) except? 0: - """Get MIG device handle for the given ind_ex under its parent NVML device. +cpdef object device_get_supported_performance_states(intptr_t device): + """Get all supported Performance States (P-States) for the device. Args: - device (intptr_t): Reference to the parent GPU device handle. - ind_ex (unsigned int): Index of the MIG device. - - Returns: - intptr_t: Reference to the MIG device handle. - - .. seealso:: `nvmlDeviceGetMigDeviceHandleByIndex` + device (Device): The identifier of the target device. """ - cdef Device mig_device + cdef int size = 16 # NVML_MAX_GPU_PERF_STATES + cdef view.array pstates = view.array(shape=(size,), itemsize=sizeof(unsigned int), format="I", mode="c") + + # The header says "size is the size of the pstates array in bytes". + # The size of an enum in C is implementation-defined, so we multiply by `sizeof(nvmlPstates_t)` here. with nogil: - __status__ = nvmlDeviceGetMigDeviceHandleByIndex(device, ind_ex, &mig_device) + __status__ = nvmlDeviceGetSupportedPerformanceStates( + device, + pstates.data, + size * sizeof(nvmlPstates_t) + ) check_status(__status__) - return mig_device + return pstates -cpdef intptr_t device_get_device_handle_from_mig_device_handle(intptr_t mig_device) except? 0: - """Get parent device handle from a MIG device handle. +cpdef object device_get_running_process_detail_list(intptr_t device, unsigned int mode): + """Get information about running processes on a device for input context Args: - mig_device (intptr_t): MIG device handle. - - Returns: - intptr_t: Device handle. - - .. seealso:: `nvmlDeviceGetDeviceHandleFromMigDeviceHandle` + device (Device): The device handle or MIG handle + mode (unsigned int): The process mode (Compute/Graphics/MPSCompute) """ - cdef Device device - with nogil: - __status__ = nvmlDeviceGetDeviceHandleFromMigDeviceHandle(mig_device, &device) - check_status(__status__) - return device + cdef ProcessDetailList_v1 plist = ProcessDetailList_v1() + cdef nvmlProcessDetailList_v1_t *ptr = plist._get_ptr() -cpdef gpm_sample_get(intptr_t device, intptr_t gpm_sample): - """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer. After two samples are gathered, you can call nvmlGpmMetricGet on those samples to retrive metrics. + # Get size of array + with nogil: + ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24) + ptr.mode = mode + ptr.numProcArrayEntries = 0 + ptr.procArray = NULL + __status__ = nvmlDeviceGetRunningProcessDetailList(device, ptr) + check_status_size(__status__) - Args: - device (intptr_t): Device to get samples for. - gpm_sample (intptr_t): Buffer to read samples into. + if ptr.numProcArrayEntries == 0: + return plist + + procArray = ProcessDetail_v1(ptr.numProcArrayEntries) + plist.proc_array = procArray - .. seealso:: `nvmlGpmSampleGet` - """ with nogil: - __status__ = nvmlGpmSampleGet(device, gpm_sample) + __status__ = nvmlDeviceGetRunningProcessDetailList(device, ptr) check_status(__status__) -cpdef gpm_mig_sample_get(intptr_t device, unsigned int gpu_instance_id, intptr_t gpm_sample): - """Read a sample of GPM metrics into the provided ``gpm_sample`` buffer for a MIG GPU Instance. +cpdef object device_get_samples(intptr_t device, int type, unsigned long long last_seen_time_stamp): + """Gets recent samples for the GPU. Args: - device (intptr_t): Device to get samples for. - gpu_instance_id (unsigned int): MIG GPU Instance ID. - gpm_sample (intptr_t): Buffer to read samples into. + device (intptr_t): The identifier for the target device. + type (SamplingType): Type of sampling event. + last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp. - .. seealso:: `nvmlGpmMigSampleGet` + .. seealso:: `nvmlDeviceGetSamples` """ + cdef unsigned int[1] sample_count = [0] + cdef unsigned int[1] sample_val_type = [0] + with nogil: + __status__ = nvmlDeviceGetSamples(device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, sample_count, NULL) + check_status_size(__status__) + cdef Sample samples = Sample(sample_count[0]) + cdef nvmlSample_t *samples_ptr = samples._get_ptr() + if sample_count[0] == 0: + return samples with nogil: - __status__ = nvmlGpmMigSampleGet(device, gpu_instance_id, gpm_sample) + __status__ = nvmlDeviceGetSamples(device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, sample_count, samples_ptr) check_status(__status__) + return (sample_val_type[0], samples) -cpdef object gpm_query_device_support(intptr_t device): - """Indicate whether the supplied device supports GPM. +cpdef object device_get_retired_pages_v2(intptr_t device, int cause): + """Returns the list of retired pages by source, including pages that are pending retirement Args: - device (intptr_t): NVML device to query for. + device (Device): The identifier of the target device. + cause (PageRetirementCause): Filter page addresses by cause of retirement. Returns: - nvmlGpmSupport_t: Structure to indicate GPM support ``nvmlGpmSupport_t``. Indicates GPM support per system for the supplied device. - - .. seealso:: `nvmlGpmQueryDeviceSupport` + tuple: A tuple of two arrays (addresses, timestamps). """ - cdef GpmSupport gpm_support_py = GpmSupport() - cdef nvmlGpmSupport_t *gpm_support = (gpm_support_py._get_ptr()) - gpm_support.version = 1 + cdef unsigned int[1] page_count = [0] + with nogil: + __status__ = nvmlDeviceGetRetiredPages_v2(device, <_PageRetirementCause>cause, page_count, NULL, NULL) + check_status_size(__status__) + if page_count[0] == 0: + return ( + view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0], + view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0] + ) + cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c") + cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c") with nogil: - __status__ = nvmlGpmQueryDeviceSupport(device, gpm_support) + __status__ = nvmlDeviceGetRetiredPages_v2(device, <_PageRetirementCause>cause, page_count, addresses.data, timestamps.data) check_status(__status__) - return gpm_support_py + return (addresses, timestamps) -cpdef unsigned int gpm_query_if_streaming_enabled(intptr_t device) except? 0: - """Get GPM stream state. +cpdef object device_get_processes_utilization_info(intptr_t device, unsigned long long last_seen_time_stamp): + """Retrieves the recent utilization and process ID for all running processes Args: - device (intptr_t): The identifier of the target device. + device (Device): The identifier of the target device. + last_seen_time_stamp (unsigned long long): Timestamp in microseconds. Set it to 0 to read utilization based + on all the samples maintained by the driver's internal sample buffer. Set to a timeStamp retrieved from + a previous query to read utilization since the previous query. Returns: - unsigned int: Returns GPM stream state NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED. - - .. seealso:: `nvmlGpmQueryIfStreamingEnabled` + ProcessesUtilizationInfo_v1: The processes utilization information structure. """ - cdef unsigned int state - with nogil: - __status__ = nvmlGpmQueryIfStreamingEnabled(device, &state) - check_status(__status__) - return state + cdef ProcessesUtilizationInfo_v1 procesesUtilInfo = ProcessesUtilizationInfo_v1() + cdef nvmlProcessesUtilizationInfo_t *ptr = procesesUtilInfo._get_ptr() + # Get size of array + with nogil: + ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24) + ptr.processSamplesCount = 0 + ptr.lastSeenTimeStamp = last_seen_time_stamp + ptr.procUtilArray = NULL + __status__ = nvmlDeviceGetProcessesUtilizationInfo( + device, ptr + ) + check_status_size(__status__) -cpdef gpm_set_streaming_enabled(intptr_t device, unsigned int state): - """Set GPM stream state. + if ptr.processSamplesCount == 0: + return procesesUtilInfo - Args: - device (intptr_t): The identifier of the target device. - state (unsigned int): GPM stream state, NVML_FEATURE_DISABLED or NVML_FEATURE_ENABLED. + cdef ProcessUtilizationInfo_v1 procUtilArray = ProcessUtilizationInfo_v1(ptr.processSamplesCount) + procesesUtilInfo.proc_util_array = procUtilArray - .. seealso:: `nvmlGpmSetStreamingEnabled` - """ with nogil: - __status__ = nvmlGpmSetStreamingEnabled(device, state) + __status__ = nvmlDeviceGetProcessesUtilizationInfo( + device, ptr + ) check_status(__status__) + return procesesUtilInfo + -cpdef object device_get_capabilities(intptr_t device): - """Get device capabilities. +cpdef device_set_hostname_v1(intptr_t device, str hostname): + """Set the hostname for the device. Args: - device (intptr_t): The identifier of the target device. + device (Device): The identifier of the target device. + hostname (str): The new hostname to set. + """ + cdef bytes = cpython.PyUnicode_AsASCIIString(hostname) + if len(bytes) > 64: + raise ValueError("hostname must 64 characters or less") - Returns: - nvmlDeviceCapabilities_v1_t: Returns GPU's capabilities. + cdef nvmlHostname_v1_t hostname_struct + memcpy(hostname_struct.value, cpython.PyBytes_AsString(bytes), len(bytes)) - .. seealso:: `nvmlDeviceGetCapabilities` - """ - cdef DeviceCapabilities_v1 caps_py = DeviceCapabilities_v1() - cdef nvmlDeviceCapabilities_t *caps = (caps_py._get_ptr()) - caps.version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24) with nogil: - __status__ = nvmlDeviceGetCapabilities(device, caps) + __status__ = nvmlDeviceSetHostname_v1(device, &hostname_struct) check_status(__status__) - return caps_py -cpdef device_workload_power_profile_clear_requested_profiles(intptr_t device, intptr_t requested_profiles): - """Clear Requested Performance Profiles. +cpdef str device_get_hostname_v1(intptr_t device): + """Get the hostname for the device. Args: - device (intptr_t): The identifier of the target device. - requested_profiles (intptr_t): Reference to struct ``nvmlWorkloadPowerProfileRequestedProfiles_v1_t``. + device (Device): The identifier of the target device. - .. seealso:: `nvmlDeviceWorkloadPowerProfileClearRequestedProfiles` + Returns: + str: Hostname of the device. """ + cdef nvmlHostname_v1_t hostname with nogil: - __status__ = nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requested_profiles) + __status__ = nvmlDeviceGetHostname_v1(device, &hostname) check_status(__status__) + return cpython.PyUnicode_FromString(hostname.value) -cpdef device_power_smoothing_activate_preset_profile(intptr_t device, intptr_t profile): - """Activiate a specific preset profile for datacenter power smoothing. The API only sets the active preset profile based on the input profileId, and ignores the other parameters of the structure. Requires root/admin permissions. +cdef FieldValue _cast_field_values(values): + if isinstance(values, FieldValue): + return values + cdef FieldValue values_ + cdef unsigned int valuesCount = len(values) + values_ = FieldValue(valuesCount) + for i, v in enumerate(values): + if isinstance(v, tuple): + if len(v) != 2: + raise ValueError("FieldValue tuple must be of length 2") + if not isinstance(v[0], int) or not isinstance(v[1], int): + raise ValueError("FieldValue tuple elements must be integers") + values_[i].field_id = v[0] + values_[i].scope_id = v[1] + elif isinstance(v, int): + values_[i].field_id = v + else: + raise ValueError("Each entry must be an integer field ID, or a tuple of (field ID, scope ID)") + return values_ + + +cpdef object device_get_field_values(intptr_t device, values): + """Request values for a list of fields for a device. This API allows multiple fields to be queried at once. If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs will be populated from a single call rather than making a driver call for each fieldId. Args: - device (intptr_t): The identifier of the target device. - profile (intptr_t): Reference to ``nvmlPowerSmoothingProfile_v1_t``. Note that only ``profile->profileId`` is used and the rest of the structure is ignored. + device (intptr_t): The device handle of the GPU to request field values for. + values (FieldValue): Array of FieldValue specifying what to retrieve. - .. seealso:: `nvmlDevicePowerSmoothingActivatePresetProfile` + .. seealso:: `nvmlDeviceGetFieldValues` """ + cdef FieldValue values_ = _cast_field_values(values) + cdef nvmlFieldValue_t *ptr = values_._get_ptr() + cdef unsigned int valuesCount = len(values) with nogil: - __status__ = nvmlDevicePowerSmoothingActivatePresetProfile(device, profile) + __status__ = nvmlDeviceGetFieldValues(device, valuesCount, ptr) check_status(__status__) + values_._data.resize((valuesCount,)) + return values_ -cpdef device_power_smoothing_update_preset_profile_param(intptr_t device, intptr_t profile): - """Update the value of a specific profile parameter contained within ``nvmlPowerSmoothingProfile_v1_t``. Requires root/admin permissions. - Args: - device (intptr_t): The identifier of the target device. - profile (intptr_t): Reference to ``nvmlPowerSmoothingProfile_v1_t`` struct. +cpdef object device_clear_field_values(intptr_t device, values): + """Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once. - .. seealso:: `nvmlDevicePowerSmoothingUpdatePresetProfileParam` + Args: + device (Device): The device handle of the GPU to request field values for + values (FieldValue): FieldValue instance to hold field values. Each value's fieldId must be populated + prior to this call """ + cdef FieldValue values_ = _cast_field_values(values) + cdef nvmlFieldValue_t *ptr = values_._get_ptr() + cdef unsigned int valuesCount = len(values) + with nogil: - __status__ = nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile) + __status__ = nvmlDeviceClearFieldValues(device, valuesCount, ptr) check_status(__status__) -cpdef device_power_smoothing_set_state(intptr_t device, intptr_t state): - """Enable or disable the Power Smoothing Feature. Requires root/admin permissions. +cpdef object device_get_supported_vgpus(intptr_t device): + """Retrieve the supported vGPU types on a physical GPU (device). Args: - device (intptr_t): The identifier of the target device. - state (intptr_t): Reference to ``nvmlPowerSmoothingState_v1_t``. + device (Device): The identifier of the target device. - .. seealso:: `nvmlDevicePowerSmoothingSetState` + Returns: + array: An array of supported vGPU type IDs. """ + cdef unsigned int[1] vgpuCount = [0] with nogil: - __status__ = nvmlDevicePowerSmoothingSetState(device, state) + __status__ = nvmlDeviceGetSupportedVgpus(device, vgpuCount, NULL) + check_status_size(__status__) + if vgpuCount[0] == 0: + return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0] + cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c") + with nogil: + __status__ = nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds.data) check_status(__status__) + return vgpuTypeIds -cpdef object device_get_addressing_mode(intptr_t device): - """Get the addressing mode for a given GPU. Addressing modes can be one of:. +cpdef object device_get_creatable_vgpus(intptr_t device): + """Retrieve the currently creatable vGPU types on a physical GPU (device). Args: - device (intptr_t): The device handle. + device (Device): The identifier of the target device. Returns: - nvmlDeviceAddressingMode_v1_t: Pointer to addressing mode of the device. - - .. seealso:: `nvmlDeviceGetAddressingMode` + array: An array of createable vGPU type IDs. """ - cdef DeviceAddressingMode_v1 mode_py = DeviceAddressingMode_v1() - cdef nvmlDeviceAddressingMode_t *mode = (mode_py._get_ptr()) - mode.version = sizeof(nvmlDeviceAddressingMode_v1_t) | (1 << 24) + cdef unsigned int[1] vgpuCount = [0] with nogil: - __status__ = nvmlDeviceGetAddressingMode(device, mode) + __status__ = nvmlDeviceGetCreatableVgpus(device, vgpuCount, NULL) + check_status_size(__status__) + if vgpuCount[0] == 0: + return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0] + cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c") + with nogil: + __status__ = nvmlDeviceGetCreatableVgpus(device, vgpuCount, vgpuTypeIds.data) check_status(__status__) - return mode_py + return vgpuTypeIds -cpdef object device_get_repair_status(intptr_t device): - """Get the repair status for TPC/Channel repair. +cpdef object device_get_active_vgpus(intptr_t device): + """Retrieve the active vGPU instances on a device. Args: - device (intptr_t): The identifier of the target device. + device (Device): The identifier of the target device. Returns: - nvmlRepairStatus_v1_t: Reference to ``nvmlRepairStatus_t``. - - .. seealso:: `nvmlDeviceGetRepairStatus` + array: An array of active vGPU instance IDs. """ - cdef RepairStatus_v1 repair_status_py = RepairStatus_v1() - cdef nvmlRepairStatus_t *repair_status = (repair_status_py._get_ptr()) - repair_status.version = sizeof(nvmlRepairStatus_v1_t) | (1 << 24) + cdef unsigned int[1] vgpuCount = [0] with nogil: - __status__ = nvmlDeviceGetRepairStatus(device, repair_status) + __status__ = nvmlDeviceGetActiveVgpus(device, vgpuCount, NULL) + check_status_size(__status__) + if vgpuCount[0] == 0: + return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0] + cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c") + with nogil: + __status__ = nvmlDeviceGetActiveVgpus(device, vgpuCount, vgpuInstances.data) check_status(__status__) - return repair_status_py + return vgpuInstances -cpdef object device_get_power_mizer_mode_v1(intptr_t device): - """Retrieves current power mizer mode on this device. +cpdef str vgpu_instance_get_vm_id(unsigned int vgpu_instance): + """Retrieve the VM ID associated with a vGPU instance. Args: - device (intptr_t): The identifier of the target device. + vgpu_instance (unsigned int): The identifier of the target vGPU instance. Returns: - nvmlDevicePowerMizerModes_v1_t: Reference in which to return the power mizer mode. - - .. seealso:: `nvmlDeviceGetPowerMizerMode_v1` + tuple[str, VgpuVmIdType]: A tuple of (id, id_type). """ - cdef DevicePowerMizerModes_v1 power_mizer_mode_py = DevicePowerMizerModes_v1() - cdef nvmlDevicePowerMizerModes_v1_t *power_mizer_mode = (power_mizer_mode_py._get_ptr()) + cdef unsigned int size = 80 + cdef char[80] vmId + cdef nvmlVgpuVmIdType_t[1] vmIdType with nogil: - __status__ = nvmlDeviceGetPowerMizerMode_v1(device, power_mizer_mode) + __status__ = nvmlVgpuInstanceGetVmID(vgpu_instance, vmId, size, vmIdType) check_status(__status__) - return power_mizer_mode_py + return (cpython.PyUnicode_FromString(vmId), vmIdType[0]) -cpdef device_set_power_mizer_mode_v1(intptr_t device, intptr_t power_mizer_mode): - """Sets the new power mizer mode. +cpdef object gpu_instance_get_creatable_vgpus(intptr_t gpu_instance): + """Query the currently creatable vGPU types on a specific GPU Instance. Args: - device (intptr_t): The identifier of the target device. - power_mizer_mode (intptr_t): Reference in which to set the power mizer mode. + gpu_instance (GpuInstance): The identifier of the target GPU Instance. - .. seealso:: `nvmlDeviceSetPowerMizerMode_v1` + Returns: + VgpuTypeIdInfo_v1: The vGPU type ID information structure. """ - with nogil: - __status__ = nvmlDeviceSetPowerMizerMode_v1(device, power_mizer_mode) - check_status(__status__) + cdef VgpuTypeIdInfo_v1 pVgpus = VgpuTypeIdInfo_v1() + cdef nvmlVgpuTypeIdInfo_v1_t *ptr = pVgpus._get_ptr() -cpdef object device_get_pdi(intptr_t device): - """Retrieves the Per Device Identifier (PDI) associated with this device. + # Get size of array + with nogil: + ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24) + ptr.vgpuCount = 0 + ptr.vgpuTypeIds = NULL + __status__ = nvmlGpuInstanceGetCreatableVgpus(gpu_instance, ptr) + check_status_size(__status__) - Args: - device (intptr_t): The identifier of the target device. + if ptr.vgpuCount == 0: + return pVgpus - Returns: - nvmlPdi_v1_t: Reference to the caller-provided structure to return the GPU PDI. + cdef view.array vgpuTypeIds = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c") + pVgpus.vgpu_type_ids = vgpuTypeIds - .. seealso:: `nvmlDeviceGetPdi` - """ - cdef Pdi_v1 pdi_py = Pdi_v1() - cdef nvmlPdi_t *pdi = (pdi_py._get_ptr()) - pdi.version = sizeof(nvmlPdi_v1_t) | (1 << 24) with nogil: - __status__ = nvmlDeviceGetPdi(device, pdi) + __status__ = nvmlGpuInstanceGetCreatableVgpus(gpu_instance, ptr) check_status(__status__) - return pdi_py + return pVgpus -cpdef device_read_write_prm_v1(intptr_t device, intptr_t buffer): - """Read or write a GPU PRM register. The input is assumed to be in TLV format in network byte order. + +cpdef object gpu_instance_get_active_vgpus(intptr_t gpu_instance): + """Retrieve the active vGPU instances within a GPU instance. Args: - device (intptr_t): Identifer of target GPU device. - buffer (intptr_t): Structure holding the input data in TLV format as well as the PRM register contents in TLV format (in the case of a successful read operation). Note: the input data and any returned data shall be in network byte order. + gpu_instance (GpuInstance): The identifier of the target GPU Instance. - .. seealso:: `nvmlDeviceReadWritePRM_v1` + Returns: + ActiveVgpuInstanceInfo: The vGPU instance ID information structure. """ + cdef ActiveVgpuInstanceInfo_v1 activeVgpuInfo = ActiveVgpuInstanceInfo_v1() + cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr = activeVgpuInfo._get_ptr() + + with nogil: + ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24) + ptr.vgpuCount = 0 + ptr.vgpuInstances = NULL + __status__ = nvmlGpuInstanceGetActiveVgpus(gpu_instance, ptr) + check_status_size(__status__) + + if ptr.vgpuCount == 0: + return activeVgpuInfo + + cdef view.array vgpuInstances = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c") + activeVgpuInfo.vgpu_instances = vgpuInstances + with nogil: - __status__ = nvmlDeviceReadWritePRM_v1(device, buffer) + __status__ = nvmlGpuInstanceGetActiveVgpus(gpu_instance, ptr) check_status(__status__) + return activeVgpuInfo + -cpdef object device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id): - """GPU instance profile query function that accepts profile ID, instead of profile name. It accepts a versioned ``nvmlGpuInstanceProfileInfo_v2_t`` or later output structure. +cpdef object gpu_instance_get_vgpu_type_creatable_placements(intptr_t gpu_instance, unsigned int vgpu_type_id): + """Query the creatable vGPU placement ID of the vGPU type within a GPU instance. Args: - device (intptr_t): The identifier of the target device. - profile_id (unsigned int): One of the profile IDs. + gpu_instance (GpuInstance): The identifier of the target GPU Instance. + vgpu_type_id (unsigned int): The vGPU type ID. Returns: - nvmlGpuInstanceProfileInfo_v2_t: Returns detailed profile information. - - .. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoByIdV` + VgpuPlacementList_v2: The vGPU placement list structure. """ - cdef GpuInstanceProfileInfo_v2 info_py = GpuInstanceProfileInfo_v2() - cdef nvmlGpuInstanceProfileInfo_v2_t *info = (info_py._get_ptr()) - info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24) - with nogil: - __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(device, profile_id, info) - check_status(__status__) - return info_py + cdef VgpuCreatablePlacementInfo_v1 pCreatablePlacementInfo = VgpuCreatablePlacementInfo_v1() + cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr = pCreatablePlacementInfo._get_ptr() -cpdef object device_get_unrepairable_memory_flag_v1(intptr_t device): - """Get the unrepairable memory flag for a given GPU. + # Get size of array + with nogil: + ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24) + ptr.count = 0 + ptr.placementIds = NULL + ptr.vgpuTypeId = vgpu_type_id + __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpu_instance, ptr) + check_status_size(__status__) - Args: - device (intptr_t): The identifier of the target device. + if ptr.count == 0: + return pCreatablePlacementInfo - Returns: - nvmlUnrepairableMemoryStatus_v1_t: Reference to ``nvmlUnrepairableMemoryStatus_v1_t``. + cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c") + pCreatablePlacementInfo.placement_ids = placementIds - .. seealso:: `nvmlDeviceGetUnrepairableMemoryFlag_v1` - """ - cdef UnrepairableMemoryStatus_v1 unrepairable_memory_status_py = UnrepairableMemoryStatus_v1() - cdef nvmlUnrepairableMemoryStatus_v1_t *unrepairable_memory_status = (unrepairable_memory_status_py._get_ptr()) with nogil: - __status__ = nvmlDeviceGetUnrepairableMemoryFlag_v1(device, unrepairable_memory_status) + __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpu_instance, ptr) check_status(__status__) - return unrepairable_memory_status_py + return pCreatablePlacementInfo -cpdef device_read_prm_counters_v1(intptr_t device, intptr_t counter_list): - """Read a list of GPU PRM Counters. + +cpdef object device_get_vgpu_type_creatable_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode): + """Query the creatable vGPU placement ID of the vGPU type within a GPU instance. Args: - device (intptr_t): Identifer of target GPU device. - counter_list (intptr_t): Structure holding the input parameters as well as the retrieved counter values. + device (Device): The identifier of the target device. + vgpu_type_id (unsigned int): The vGPU type ID. + mode (unsigned int): The placement mode. 0: Heterogeneous, 1: Homogeneous. - .. seealso:: `nvmlDeviceReadPRMCounters_v1` + Returns: + VgpuPlacementList_v2: The vGPU placement list structure. """ - with nogil: - __status__ = nvmlDeviceReadPRMCounters_v1(device, counter_list) - check_status(__status__) + cdef VgpuPlacementList_v2 pPlacementList = VgpuPlacementList_v2() + cdef nvmlVgpuPlacementList_v2_t *ptr = pPlacementList._get_ptr() -cpdef device_set_rusd_settings_v1(intptr_t device, intptr_t settings): - """Set Read-only user shared data (RUSD) settings for GPU. Requires root/admin permissions. + # Get size of array + with nogil: + ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24) + ptr.count = 0 + ptr.placementIds = NULL + ptr.mode = mode + __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpu_type_id, ptr) + check_status_size(__status__) - Args: - device (intptr_t): The identifier of the target device. - settings (intptr_t): Reference to nvmlRusdSettings_t struct. + if ptr.count == 0: + return pPlacementList + + cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c") + pPlacementList.placement_ids = placementIds - .. seealso:: `nvmlDeviceSetRusdSettings_v1` - """ with nogil: - __status__ = nvmlDeviceSetRusdSettings_v1(device, settings) + __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpu_type_id, ptr) check_status(__status__) + return pPlacementList -cpdef object system_get_topology_gpu_set(unsigned int cpuNumber): - """Retrieve the set of GPUs that have a CPU affinity with the given CPU number + +cpdef object vgpu_instance_get_metadata(unsigned int vgpu_instance): + """Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its + associated VM such as the currently installed NVIDIA guest driver version, together with host driver version and + an opaque data section containing internal state. Args: - cpuNumber (unsigned int): The CPU number + vgpu_instance (unsigned int): The identifier of the target vGPU instance. Returns: - array: An array of device handles for GPUs found with affinity to cpuNumber + VgpuMetadata: Metadata. """ - cdef unsigned int[1] count = [0] + cdef VgpuMetadata vgpuMetadata = VgpuMetadata() + cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuMetadata_t)] + cdef nvmlVgpuMetadata_t *ptr = vgpuMetadata._get_ptr() + with nogil: - __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, count, NULL) + __status__ = nvmlVgpuInstanceGetMetadata(vgpu_instance, ptr, bufferSize) check_status_size(__status__) - if count[0] == 0: - return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] - cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") - with nogil: - __status__ = nvmlSystemGetTopologyGpuSet(cpuNumber, count, deviceArray.data) - check_status(__status__) - return deviceArray + return vgpuMetadata -cpdef str system_get_driver_branch(): - """Retrieves the driver branch of the NVIDIA driver installed on the system. + +cpdef object device_get_vgpu_metadata(intptr_t device): + """Returns a vGPU metadata structure for the physical GPU indicated by device. The structure contains + information about the GPU and the currently installed NVIDIA host driver version that's controlling it, + together with an opaque data section containing internal state. + + Args: + device (Device): The identifier of the target device. Returns: - str: driver branch. + VgpuPgpuMetadata: Metadata. """ - cdef nvmlSystemDriverBranchInfo_t info - # Calculation copied from the macro NVML_STRUCT_VERSION in nvml.h - # Needs to be updated if the version of the nvmlSystemDriverBranchInfo_t - # struct changes in the future. - info.version = sizeof(nvmlSystemDriverBranchInfo_v1_t) | (1 << 24) - cdef unsigned int length = 80 + cdef VgpuPgpuMetadata pgpuMetadata = VgpuPgpuMetadata() + cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuPgpuMetadata_t)] + cdef nvmlVgpuPgpuMetadata_t *ptr = pgpuMetadata._get_ptr() + with nogil: - __status__ = nvmlSystemGetDriverBranch(&info, length) - check_status(__status__) - return cpython.PyUnicode_FromString(info.branch) + __status__ = nvmlDeviceGetVgpuMetadata(device, ptr, bufferSize) + check_status_size(__status__) + return pgpuMetadata -cpdef object unit_get_devices(intptr_t unit): - """Retrieves the set of GPU devices that are attached to the specified unit. + +cpdef object get_vgpu_compatibility(VgpuMetadata vgpu_metadata, VgpuPgpuMetadata pgpu_metadata): + """Takes a vGPU instance metadata structure read from vgpu_instance_get_metadata() and a vGPU metadata structure + for a physical GPU read from device_get_vgpu_metadata, and returns compatibility information of the vGPU instance + and the physical GPU. Args: - unit (Unit): The identifier of the target unit. + vgpu_metadata (VgpuMetadata): The vGPU instance metadata. + pgpu_metadata (VgpuPgpuMetadata): The physical GPU metadata. Returns: - array: An array of device handles for GPUs attached to the unit. + VgpuPgpuCompatibility: Compatibility information. """ - cdef unsigned int[1] deviceCount = [0] - with nogil: - __status__ = nvmlUnitGetDevices(unit, deviceCount, NULL) - check_status_size(__status__) - if deviceCount[0] == 0: - return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] - cdef view.array deviceArray = view.array(shape=(deviceCount[0],), itemsize=sizeof(intptr_t), format="P", mode="c") + cdef VgpuPgpuCompatibility compatibilityInfo = VgpuPgpuCompatibility() + cdef nvmlVgpuPgpuCompatibility_t *ptr = compatibilityInfo._get_ptr() + cdef nvmlVgpuMetadata_t *vgpu_metadata_ptr = vgpu_metadata._get_ptr() + cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = pgpu_metadata._get_ptr() + with nogil: - __status__ = nvmlUnitGetDevices(unit, deviceCount, deviceArray.data) + __status__ = nvmlGetVgpuCompatibility(vgpu_metadata_ptr, pgpu_metadata_ptr, ptr) check_status(__status__) - return deviceArray + return compatibilityInfo -cpdef object device_get_topology_nearest_gpus(intptr_t device, unsigned int level): - """Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level - Args: - device (Device): The identifier of the first device - level (GpuTopologyLevel): The level to search for other GPUs +cpdef tuple get_vgpu_version(): + """Query the ranges of supported vGPU versions. Returns: - array: An array of device handles for GPUs found at level + tuple: A tuple of (VgpuVersion supported, VgpuVersion current). """ - cdef unsigned int[1] count = [0] - with nogil: - __status__ = nvmlDeviceGetTopologyNearestGpus( - device, - level, - count, - NULL - ) - check_status_size(__status__) - if count[0] == 0: - return view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] - cdef view.array deviceArray = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") + cdef VgpuVersion supported = VgpuVersion() + cdef nvmlVgpuVersion_t *supported_ptr = supported._get_ptr() + cdef VgpuVersion current = VgpuVersion() + cdef nvmlVgpuVersion_t *current_ptr = current._get_ptr() + with nogil: - __status__ = nvmlDeviceGetTopologyNearestGpus( - device, - level, - count, - deviceArray.data - ) + __status__ = nvmlGetVgpuVersion(supported_ptr, current_ptr) + check_status(__status__) - return deviceArray + return (supported, current) -cpdef object device_get_temperature_v(intptr_t device, nvmlTemperatureSensors_t sensorType): - """Retrieves the current temperature readings (in degrees C) for the given device. +cpdef object device_get_vgpu_instances_utilization_info(intptr_t device): + """ + Retrieves recent utilization for vGPU instances running on a physical GPU (device). Args: - device (intptr_t): Target device identifier. + device (Device): The identifier of the target device. Returns: - nvmlTemperature_v1_t: Structure specifying the sensor type (input) and retrieved temperature value (output). - - .. seealso:: `nvmlDeviceGetTemperatureV` + VgpuInstancesUtilizationInfo_v1: The vGPU instances utilization information structure. """ - cdef nvmlTemperature_v1_t[1] temperature - temperature[0].version = sizeof(nvmlTemperature_v1_t) | (1 << 24) - temperature[0].sensorType = sensorType + cdef VgpuInstancesUtilizationInfo_v1 vgpuUtilInfo = VgpuInstancesUtilizationInfo_v1() + cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr = vgpuUtilInfo._get_ptr() with nogil: - __status__ = nvmlDeviceGetTemperatureV(device, temperature) - check_status(__status__) - return temperature.temperature - + ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24) + ptr.vgpuInstanceCount = 0 + ptr.vgpuUtilArray = NULL + __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(device, ptr) + check_status_size(__status__) -cpdef object device_get_supported_performance_states(intptr_t device): - """Get all supported Performance States (P-States) for the device. + if ptr.vgpuInstanceCount == 0: + return vgpuUtilInfo - Args: - device (Device): The identifier of the target device. - """ - cdef int size = 16 # NVML_MAX_GPU_PERF_STATES - cdef view.array pstates = view.array(shape=(size,), itemsize=sizeof(unsigned int), format="I", mode="c") + cdef VgpuInstanceUtilizationInfo_v1 vgpuUtilArray = VgpuInstanceUtilizationInfo_v1(ptr.vgpuInstanceCount) + vgpuUtilInfo.vgpu_util_array = vgpuUtilArray - # The header says "size is the size of the pstates array in bytes". - # The size of an enum in C is implementation-defined, so we multiply by `sizeof(nvmlPstates_t)` here. with nogil: - __status__ = nvmlDeviceGetSupportedPerformanceStates( - device, - pstates.data, - size * sizeof(nvmlPstates_t) - ) + __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(device, ptr) check_status(__status__) - return pstates + return vgpuUtilInfo -cpdef object device_get_running_process_detail_list(intptr_t device, unsigned int mode): - """Get information about running processes on a device for input context - Args: - device (Device): The device handle or MIG handle - mode (unsigned int): The process mode (Compute/Graphics/MPSCompute) +cpdef object device_get_vgpu_processes_utilization_info(intptr_t device, unsigned int last_seen_time_stamp): """ + Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device). - cdef ProcessDetailList_v1 plist = ProcessDetailList_v1() - cdef nvmlProcessDetailList_v1_t *ptr = plist._get_ptr() + Args: + device (Device): The identifier of the target device. + + Returns: + VgpuProcessesUtilizationInfo: The vGPU processes utilization information structure. + """ + cdef VgpuProcessesUtilizationInfo_v1 vgpuProcUtilInfo = VgpuProcessesUtilizationInfo_v1() + cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr = vgpuProcUtilInfo._get_ptr() - # Get size of array with nogil: - ptr.version = sizeof(nvmlProcessDetailList_v1_t) | (1 << 24) - ptr.mode = mode - ptr.numProcArrayEntries = 0 - ptr.procArray = NULL - __status__ = nvmlDeviceGetRunningProcessDetailList(device, ptr) + ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24) + ptr.vgpuProcessCount = 0 + ptr.vgpuProcUtilArray = NULL + ptr.lastSeenTimeStamp = last_seen_time_stamp + __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(device, ptr) check_status_size(__status__) - if ptr.numProcArrayEntries == 0: - return plist + if ptr.vgpuProcessCount == 0: + return vgpuProcUtilInfo - procArray = ProcessDetail_v1(ptr.numProcArrayEntries) - plist.proc_array = procArray + cdef VgpuProcessUtilizationInfo_v1 vgpuProcUtilArray = VgpuProcessUtilizationInfo_v1(ptr.vgpuProcessCount) + vgpuProcUtilInfo.vgpu_proc_util_array = vgpuProcUtilArray with nogil: - __status__ = nvmlDeviceGetRunningProcessDetailList(device, ptr) + __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(device, ptr) check_status(__status__) + return vgpuProcUtilInfo -cpdef object device_get_samples(intptr_t device, int type, unsigned long long last_seen_time_stamp): - """Gets recent samples for the GPU. + +cpdef object device_get_gpu_instances(intptr_t device, unsigned int profile_id): + """Get GPU instances for given profile ID. Args: - device (intptr_t): The identifier for the target device. - type (SamplingType): Type of sampling event. - last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp. + device (Device): The identifier of the target device. + profile_id (unsigned int): The GPU instance profile ID. See device_get_gpu_instance_profile_info(). - .. seealso:: `nvmlDeviceGetSamples` + Returns: + array: An array of GPU instance handles. """ - cdef unsigned int[1] sample_count = [0] - cdef unsigned int[1] sample_val_type = [0] + cdef unsigned int[1] count = [0] with nogil: - __status__ = nvmlDeviceGetSamples(device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, sample_count, NULL) + __status__ = nvmlDeviceGetGpuInstances(device, profile_id, NULL, count) check_status_size(__status__) - cdef Sample samples = Sample(sample_count[0]) - cdef nvmlSample_t *samples_ptr = samples._get_ptr() - if sample_count[0] == 0: - return samples + + if count[0] == 0: + view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] + + cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") with nogil: - __status__ = nvmlDeviceGetSamples(device, <_SamplingType>type, last_seen_time_stamp, <_ValueType*>sample_val_type, sample_count, samples_ptr) + __status__ = nvmlDeviceGetGpuInstances(device, profile_id, gpuInstances.data, count) check_status(__status__) - return (sample_val_type[0], samples) + return gpuInstances -cpdef object device_get_retired_pages_v2(intptr_t device, int cause): - """Returns the list of retired pages by source, including pages that are pending retirement + +cpdef object gpu_instance_get_compute_instances(intptr_t gpu_instance, unsigned int profile_id): + """Get Compute instances for given profile ID. Args: - device (Device): The identifier of the target device. - cause (PageRetirementCause): Filter page addresses by cause of retirement. + gpu_instance (GpuInstance): The identifier of the target GPU Instance. + profile_id (unsigned int): The Compute instance profile ID. Returns: - tuple: A tuple of two arrays (addresses, timestamps). + array: An array of Compute instance handles. """ - cdef unsigned int[1] page_count = [0] + cdef unsigned int[1] count = [0] with nogil: - __status__ = nvmlDeviceGetRetiredPages_v2(device, <_PageRetirementCause>cause, page_count, NULL, NULL) + __status__ = nvmlGpuInstanceGetComputeInstances(gpu_instance, profile_id, NULL, count) check_status_size(__status__) - if page_count[0] == 0: - return ( - view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0], - view.array(shape=(1,), itemsize=sizeof(unsigned long long), format="Q", mode="c")[:0] - ) - cdef view.array addresses = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c") - cdef view.array timestamps = view.array(shape=(page_count[0],), itemsize=sizeof(unsigned long long), format="Q", mode="c") + + if count[0] == 0: + view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] + + cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") with nogil: - __status__ = nvmlDeviceGetRetiredPages_v2(device, <_PageRetirementCause>cause, page_count, addresses.data, timestamps.data) + __status__ = nvmlGpuInstanceGetComputeInstances(gpu_instance, profile_id, computeInstances.data, count) check_status(__status__) - return (addresses, timestamps) + + return computeInstances -cpdef object device_get_processes_utilization_info(intptr_t device, unsigned long long last_seen_time_stamp): - """Retrieves the recent utilization and process ID for all running processes +cpdef object device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t device): + """Retrieves the counts of SRAM unique uncorrected ECC errors Args: device (Device): The identifier of the target device. - last_seen_time_stamp (unsigned long long): Timestamp in microseconds. Set it to 0 to read utilization based - on all the samples maintained by the driver's internal sample buffer. Set to a timeStamp retrieved from - a previous query to read utilization since the previous query. Returns: - ProcessesUtilizationInfo_v1: The processes utilization information structure. + EccSramUniqueUncorrectedErrorCounts_v1: The ECC SRAM unique uncorrected error counts structure. """ - cdef ProcessesUtilizationInfo_v1 procesesUtilInfo = ProcessesUtilizationInfo_v1() - cdef nvmlProcessesUtilizationInfo_t *ptr = procesesUtilInfo._get_ptr() - # Get size of array + cdef EccSramUniqueUncorrectedErrorCounts_v1 errorCounts = EccSramUniqueUncorrectedErrorCounts_v1() + cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr = errorCounts._get_ptr() + with nogil: - ptr.version = sizeof(nvmlProcessesUtilizationInfo_v1_t) | (1 << 24) - ptr.processSamplesCount = 0 - ptr.lastSeenTimeStamp = last_seen_time_stamp - ptr.procUtilArray = NULL - __status__ = nvmlDeviceGetProcessesUtilizationInfo( - device, ptr - ) + ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24) + ptr.entryCount = 0 + ptr.entries = NULL + __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, ptr) check_status_size(__status__) - if ptr.processSamplesCount == 0: - return procesesUtilInfo + cdef EccSramUniqueUncorrectedErrorEntry_v1 entries = EccSramUniqueUncorrectedErrorEntry_v1(ptr.entryCount) + errorCounts.entries = entries - cdef ProcessUtilizationInfo_v1 procUtilArray = ProcessUtilizationInfo_v1(ptr.processSamplesCount) - procesesUtilInfo.proc_util_array = procUtilArray + if ptr.entryCount == 0: + return errorCounts with nogil: - __status__ = nvmlDeviceGetProcessesUtilizationInfo( - device, ptr - ) + __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, ptr) check_status(__status__) - return procesesUtilInfo + return errorCounts -cpdef device_set_hostname_v1(intptr_t device, str hostname): - """Set the hostname for the device. +cpdef object device_get_gpu_fabric_info_v(intptr_t device): + """Versioned wrapper around nvmlDeviceGetGpuFabricInfo that accepts a versioned ``nvmlGpuFabricInfo_v2_t`` or later output structure. Args: - device (Device): The identifier of the target device. - hostname (str): The new hostname to set. + device (intptr_t): The identifier of the target device. + + Returns: + nvmlGpuFabricInfo_v3_t: Information about GPU fabric state. + + .. seealso:: `nvmlDeviceGetGpuFabricInfoV` """ - cdef bytes = cpython.PyUnicode_AsASCIIString(hostname) - if len(bytes) > 64: - raise ValueError("hostname must 64 characters or less") + cdef GpuFabricInfo_v3 gpu_fabric_info_v3_py + cdef GpuFabricInfo_v2 gpu_fabric_info_v2_py + cdef nvmlGpuFabricInfoV_t *gpu_fabric_info + if CUDA_VERSION >= 13000: + gpu_fabric_info_v3_py = GpuFabricInfo_v3() + gpu_fabric_info = (gpu_fabric_info_v3_py._get_ptr()) + gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24) + with nogil: + __status__ = nvmlDeviceGetGpuFabricInfoV(device, gpu_fabric_info) + check_status(__status__) + return gpu_fabric_info_v3_py - cdef nvmlHostname_v1_t hostname_struct - memcpy(hostname_struct.value, cpython.PyBytes_AsString(bytes), len(bytes)) + else: + gpu_fabric_info_v2_py = GpuFabricInfo_v2() + gpu_fabric_info = (gpu_fabric_info_v2_py._get_ptr()) + gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v2_t) | (2 << 24) + with nogil: + __status__ = nvmlDeviceGetGpuFabricInfoV(device, gpu_fabric_info) + check_status(__status__) + return gpu_fabric_info_v2_py - with nogil: - __status__ = nvmlDeviceSetHostname_v1(device, &hostname_struct) - check_status(__status__) + +cpdef object device_get_platform_info(intptr_t device): + """Get platform information of this device. + + Args: + device (intptr_t): The identifier of the target device. + + Returns: + nvmlPlatformInfo_v2_t: Pointer to the caller-provided structure of nvmlPlatformInfo_t. + + .. seealso:: `nvmlDeviceGetPlatformInfo` + """ + cdef PlatformInfo_v1 platform_info_v1_py + cdef PlatformInfo_v2 platform_info_v2_py + cdef nvmlPlatformInfo_t *platform_info + + if CUDA_VERSION >= 13000: + platform_info_v2_py = PlatformInfo_v2() + platform_info = (platform_info_v2_py._get_ptr()) + platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24) + with nogil: + __status__ = nvmlDeviceGetPlatformInfo(device, platform_info) + check_status(__status__) + return platform_info_v2_py + + else: + platform_info_v1_py = PlatformInfo_v1() + platform_info = (platform_info_v1_py._get_ptr()) + platform_info.version = sizeof(nvmlPlatformInfo_v1_t) | (1 << 24) + with nogil: + __status__ = nvmlDeviceGetPlatformInfo(device, platform_info) + check_status(__status__) + return platform_info_v1_py -cpdef str device_get_hostname_v1(intptr_t device): - """Get the hostname for the device. +cpdef object device_get_nvlink_info(intptr_t device): + """Query NVLINK information associated with this device. Args: - device (Device): The identifier of the target device. + device (intptr_t): The identifier of the target device. Returns: - str: Hostname of the device. + nvmlNvLinkInfo_v2_t: Reference to ``nvmlNvLinkInfo_t``. + + .. seealso:: `nvmlDeviceGetNvLinkInfo` """ - cdef nvmlHostname_v1_t hostname - with nogil: - __status__ = nvmlDeviceGetHostname_v1(device, &hostname) - check_status(__status__) - return cpython.PyUnicode_FromString(hostname.value) + cdef NvLinkInfo_v1 info_v1_py + cdef NvLinkInfo_v2 info_v2_py + cdef nvmlNvLinkInfo_t *info + if CUDA_VERSION >= 13000: + info_v2_py = NvLinkInfo_v2() + info = (info_v2_py._get_ptr()) + info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24) + with nogil: + __status__ = nvmlDeviceGetNvLinkInfo(device, info) + check_status(__status__) + return info_v2_py -cdef FieldValue _cast_field_values(values): - if isinstance(values, FieldValue): - return values - cdef FieldValue values_ - cdef unsigned int valuesCount = len(values) - values_ = FieldValue(valuesCount) - for i, v in enumerate(values): - if isinstance(v, tuple): - if len(v) != 2: - raise ValueError("FieldValue tuple must be of length 2") - if not isinstance(v[0], int) or not isinstance(v[1], int): - raise ValueError("FieldValue tuple elements must be integers") - values_[i].field_id = v[0] - values_[i].scope_id = v[1] - elif isinstance(v, int): - values_[i].field_id = v - else: - raise ValueError("Each entry must be an integer field ID, or a tuple of (field ID, scope ID)") - return values_ + else: + info_v1_py = NvLinkInfo_v1() + info = (info_v1_py._get_ptr()) + info.version = sizeof(nvmlNvLinkInfo_v1_t) | (1 << 24) + with nogil: + __status__ = nvmlDeviceGetNvLinkInfo(device, info) + check_status(__status__) + return info_v1_py -cpdef object device_get_field_values(intptr_t device, values): - """Request values for a list of fields for a device. This API allows multiple fields to be queried at once. If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs will be populated from a single call rather than making a driver call for each fieldId. +cpdef intptr_t system_event_set_create(): + """Create an empty set of system events. Event set should be freed by ``nvmlSystemEventSetFree``.""" + cdef nvmlSystemEventSetCreateRequest_v1_t[1] request + with nogil: + request[0].version = sizeof(nvmlSystemEventSetCreateRequest_v1_t) | (1 << 24) + __status__ = nvmlSystemEventSetCreate(request) + check_status(__status__) + return (request[0].set) - Args: - device (intptr_t): The device handle of the GPU to request field values for. - values (FieldValue): Array of FieldValue specifying what to retrieve. - .. seealso:: `nvmlDeviceGetFieldValues` - """ - cdef FieldValue values_ = _cast_field_values(values) - cdef nvmlFieldValue_t *ptr = values_._get_ptr() - cdef unsigned int valuesCount = len(values) +cpdef system_event_set_free(intptr_t event_set): + """Frees an event set.""" + cdef nvmlSystemEventSetFreeRequest_v1_t[1] request + request[0].set = event_set with nogil: - __status__ = nvmlDeviceGetFieldValues(device, valuesCount, ptr) + request[0].version = sizeof(nvmlSystemEventSetFreeRequest_v1_t) | (1 << 24) + __status__ = nvmlSystemEventSetFree(request) check_status(__status__) - values_._data.resize((valuesCount,)) - return values_ - -cpdef object device_clear_field_values(intptr_t device, values): - """Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once. +cpdef system_register_events(unsigned long long event_types, intptr_t event_set): + """Starts recording of events on system and add the events to specified ``nvmlSystemEventSet_t``. Args: - device (Device): The device handle of the GPU to request field values for - values (FieldValue): FieldValue instance to hold field values. Each value's fieldId must be populated - prior to this call + event_types (unsigned long long): Bitmask of nvmlSystemEventType_t values representing the events to register. + event_set (intptr_t): The system event set handle. """ - cdef FieldValue values_ = _cast_field_values(values) - cdef nvmlFieldValue_t *ptr = values_._get_ptr() - cdef unsigned int valuesCount = len(values) - + cdef nvmlSystemRegisterEventRequest_v1_t[1] request + request[0].set = event_set + request[0].eventTypes = event_types with nogil: - __status__ = nvmlDeviceClearFieldValues(device, valuesCount, ptr) + request[0].version = sizeof(nvmlSystemRegisterEventRequest_v1_t) | (1 << 24) + __status__ = nvmlSystemRegisterEvents(request) check_status(__status__) -cpdef object device_get_supported_vgpus(intptr_t device): - """Retrieve the supported vGPU types on a physical GPU (device). +cpdef object system_event_set_wait(intptr_t event_set, unsigned int timeout_ms, unsigned int buffer_size): + """Waits for events to occur on the system event set. Args: - device (Device): The identifier of the target device. + event_set (intptr_t): The system event set handle. + timeout_ms (unsigned int): The maximum amount of time in milliseconds to wait for an event. + buffer_size (unsigned int): The size of the event buffer. Returns: - array: An array of supported vGPU type IDs. + SystemEvent: The system event that occurred. """ - cdef unsigned int[1] vgpuCount = [0] - with nogil: - __status__ = nvmlDeviceGetSupportedVgpus(device, vgpuCount, NULL) - check_status_size(__status__) - if vgpuCount[0] == 0: - return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0] - cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c") + cdef nvmlSystemEventSetWaitRequest_v1_t[1] request + cdef SystemEventData_v1 event_data = SystemEventData_v1(buffer_size) + request[0].timeoutms = timeout_ms + request[0].set = event_set + request[0].data = (event_data._get_ptr()) + request[0].dataSize = buffer_size with nogil: - __status__ = nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds.data) + request[0].version = sizeof(nvmlSystemEventSetWaitRequest_v1_t) | (1 << 24) + __status__ = nvmlSystemEventSetWait(request) check_status(__status__) - return vgpuTypeIds + event_data._data.resize((request[0].numEvent,)) + return event_data -cpdef object device_get_creatable_vgpus(intptr_t device): - """Retrieve the currently creatable vGPU types on a physical GPU (device). +cpdef unsigned int device_get_fan_speed_rpm(intptr_t device, unsigned int fan): + """Retrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan. Args: - device (Device): The identifier of the target device. + device (intptr_t): The identifier of the target device. + fan (unsigned int): The index of the fan to query. Returns: - array: An array of createable vGPU type IDs. + rpm (unsigned int): The fan speed in RPM. + + .. seealso:: `nvmlDeviceGetFanSpeedRPM` """ - cdef unsigned int[1] vgpuCount = [0] - with nogil: - __status__ = nvmlDeviceGetCreatableVgpus(device, vgpuCount, NULL) - check_status_size(__status__) - if vgpuCount[0] == 0: - return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0] - cdef view.array vgpuTypeIds = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c") + cdef nvmlFanSpeedInfo_v1_t[1] fan_speed + fan_speed[0].version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24) + fan_speed[0].fan = fan with nogil: - __status__ = nvmlDeviceGetCreatableVgpus(device, vgpuCount, vgpuTypeIds.data) + __status__ = nvmlDeviceGetFanSpeedRPM(device, fan_speed) check_status(__status__) - return vgpuTypeIds + return fan_speed[0].speed -cpdef object device_get_active_vgpus(intptr_t device): - """Retrieve the active vGPU instances on a device. +cpdef int device_get_margin_temperature(intptr_t device): + """Retrieves the thermal margin temperature (distance to nearest slowdown threshold). Args: - device (Device): The identifier of the target device. + device (intptr_t): The identifier of the target device. Returns: - array: An array of active vGPU instance IDs. + margin_temperature (int): The margin temperature value. + + .. seealso:: `nvmlDeviceGetMarginTemperature` """ - cdef unsigned int[1] vgpuCount = [0] - with nogil: - __status__ = nvmlDeviceGetActiveVgpus(device, vgpuCount, NULL) - check_status_size(__status__) - if vgpuCount[0] == 0: - return view.array(shape=(1,), itemsize=sizeof(unsigned int), format="I", mode="c")[:0] - cdef view.array vgpuInstances = view.array(shape=(deviceCount[0],), itemsize=sizeof(unsigned int), format="I", mode="c") + cdef nvmlMarginTemperature_v1_t[1] margin_temp_info + margin_temp_info[0].version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24) with nogil: - __status__ = nvmlDeviceGetActiveVgpus(device, vgpuCount, vgpuInstances.data) + __status__ = nvmlDeviceGetMarginTemperature(device, margin_temp_info) check_status(__status__) - return vgpuInstances + return margin_temp_info[0].marginTemperature -cpdef str vgpu_instance_get_vm_id(unsigned int vgpu_instance): - """Retrieve the VM ID associated with a vGPU instance. +cpdef object device_get_clock_offsets(intptr_t device, nvmlClockType_t clock_type, nvmlPstates_t pstate): + """Retrieve min, max and current clock offset of some clock domain for a given PState. Args: - vgpu_instance (unsigned int): The identifier of the target vGPU instance. + device (intptr_t): The identifier of the target device. Returns: - tuple[str, VgpuVmIdType]: A tuple of (id, id_type). + nvmlClockOffset_v1_t: Structure specifying the clock type (input) and the pstate (input) retrieved clock offset value (output), min clock offset (output) and max clock offset (output). + + .. seealso:: `nvmlDeviceGetClockOffsets` """ - cdef unsigned int size = 80 - cdef char[80] vmId - cdef nvmlVgpuVmIdType_t[1] vmIdType + cdef ClockOffset_v1 info_py = ClockOffset_v1() + cdef nvmlClockOffset_v1_t *info = (info_py._get_ptr()) + info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24) + info.type = clock_type + info.pstate = pstate with nogil: - __status__ = nvmlVgpuInstanceGetVmID(vgpu_instance, vmId, size, vmIdType) + __status__ = nvmlDeviceGetClockOffsets(device, info) check_status(__status__) - return (cpython.PyUnicode_FromString(vmId), vmIdType[0]) + return info_py -cpdef object gpu_instance_get_creatable_vgpus(intptr_t gpu_instance): - """Query the currently creatable vGPU types on a specific GPU Instance. +cpdef object device_get_vgpu_type_supported_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode): + """Query the supported vGPU placement ID of the vGPU type. Args: - gpu_instance (GpuInstance): The identifier of the target GPU Instance. + device (intptr_t): Identifier of the target device. + vgpu_type_id (unsigned int): Handle to vGPU type. The vGPU type ID. + mode (unsigned int): The placement mode. 0: Heterogeneous, 1: Homogeneous. Returns: - VgpuTypeIdInfo_v1: The vGPU type ID information structure. - """ - - cdef VgpuTypeIdInfo_v1 pVgpus = VgpuTypeIdInfo_v1() - cdef nvmlVgpuTypeIdInfo_v1_t *ptr = pVgpus._get_ptr() + nvmlVgpuPlacementList_v2_t: Pointer to the vGPU placement structure ``nvmlVgpuPlacementList_t``. - # Get size of array + .. seealso:: `nvmlDeviceGetVgpuTypeSupportedPlacements` + """ + cdef VgpuPlacementList_v2 p_placement_list_py = VgpuPlacementList_v2() + cdef nvmlVgpuPlacementList_t *p_placement_list = (p_placement_list_py._get_ptr()) with nogil: - ptr.version = sizeof(nvmlVgpuTypeIdInfo_v1_t) | (1 << 24) - ptr.vgpuCount = 0 - ptr.vgpuTypeIds = NULL - __status__ = nvmlGpuInstanceGetCreatableVgpus(gpu_instance, ptr) + p_placement_list.count = 0 + p_placement_list.placementIds = NULL + p_placement_list.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24) + __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(device, vgpu_type_id, p_placement_list) check_status_size(__status__) - if ptr.vgpuCount == 0: - return pVgpus + if p_placement_list.count == 0: + return p_placement_list_py - cdef view.array vgpuTypeIds = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c") - pVgpus.vgpu_type_ids = vgpuTypeIds + cdef view.array placement_ids = view.array(shape=(p_placement_list.count,), itemsize=sizeof(unsigned int), format="I", mode="c") + p_placement_list_py.placement_ids = placement_ids with nogil: - __status__ = nvmlGpuInstanceGetCreatableVgpus(gpu_instance, ptr) + __status__ = nvmlDeviceGetVgpuTypeSupportedPlacements(device, vgpu_type_id, p_placement_list) check_status(__status__) - return pVgpus + return p_placement_list_py -cpdef object gpu_instance_get_active_vgpus(intptr_t gpu_instance): - """Retrieve the active vGPU instances within a GPU instance. +cpdef unsigned int vgpu_instance_get_placement_id(unsigned int vgpu_instance): + """Query the placement ID of active vGPU instance. Args: - gpu_instance (GpuInstance): The identifier of the target GPU Instance. - - Returns: - ActiveVgpuInstanceInfo: The vGPU instance ID information structure. - """ - cdef ActiveVgpuInstanceInfo_v1 activeVgpuInfo = ActiveVgpuInstanceInfo_v1() - cdef nvmlActiveVgpuInstanceInfo_v1_t *ptr = activeVgpuInfo._get_ptr() - - with nogil: - ptr.version = sizeof(nvmlActiveVgpuInstanceInfo_v1_t) | (1 << 24) - ptr.vgpuCount = 0 - ptr.vgpuInstances = NULL - __status__ = nvmlGpuInstanceGetActiveVgpus(gpu_instance, ptr) - check_status_size(__status__) - - if ptr.vgpuCount == 0: - return activeVgpuInfo + vgpu_instance (unsigned int): Identifier of the target vGPU instance. - cdef view.array vgpuInstances = view.array(shape=(ptr.vgpuCount,), itemsize=sizeof(unsigned int), format="I", mode="c") - activeVgpuInfo.vgpu_instances = vgpuInstances + Returns: + unsigned int: The placement ID + .. seealso:: `nvmlVgpuInstanceGetPlacementId` + """ + cdef nvmlVgpuPlacementId_t[1] p_placement + p_placement[0].version = sizeof(nvmlVgpuPlacementId_v1_t) | (1 << 24) with nogil: - __status__ = nvmlGpuInstanceGetActiveVgpus(gpu_instance, ptr) + __status__ = nvmlVgpuInstanceGetPlacementId(vgpu_instance, p_placement) check_status(__status__) - - return activeVgpuInfo + return p_placement[0].placementId -cpdef object gpu_instance_get_vgpu_type_creatable_placements(intptr_t gpu_instance, unsigned int vgpu_type_id): - """Query the creatable vGPU placement ID of the vGPU type within a GPU instance. +cpdef object device_get_capabilities(intptr_t device): + """Get device capabilities. Args: - gpu_instance (GpuInstance): The identifier of the target GPU Instance. - vgpu_type_id (unsigned int): The vGPU type ID. + device (intptr_t): The identifier of the target device. Returns: - VgpuPlacementList_v2: The vGPU placement list structure. + nvmlDeviceCapabilities_v1_t: Returns GPU's capabilities. + + .. seealso:: `nvmlDeviceGetCapabilities` """ + cdef nvmlDeviceCapabilities_t[1] caps + caps[0].version = sizeof(nvmlDeviceCapabilities_v1_t) | (1 << 24) + with nogil: + __status__ = nvmlDeviceGetCapabilities(device, caps) + check_status(__status__) + return caps[0].capMask - cdef VgpuCreatablePlacementInfo_v1 pCreatablePlacementInfo = VgpuCreatablePlacementInfo_v1() - cdef nvmlVgpuCreatablePlacementInfo_v1_t *ptr = pCreatablePlacementInfo._get_ptr() - # Get size of array - with nogil: - ptr.version = sizeof(nvmlVgpuCreatablePlacementInfo_v1_t) | (1 << 24) - ptr.count = 0 - ptr.placementIds = NULL - __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpu_instance, ptr) - check_status_size(__status__) +cpdef object device_get_conf_compute_gpu_attestation_report(intptr_t device, char[32] nonce): + """Get Conf Computing GPU attestation report. - if ptr.count == 0: - return pCreatablePlacementInfo + Args: + device (intptr_t): The identifier of the target device. - cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c") - pCreatablePlacementInfo.placement_ids = placementIds + Returns: + nvmlConfComputeGpuAttestationReport_t: Reference in which to return the gpu attestation report. + .. seealso:: `nvmlDeviceGetConfComputeGpuAttestationReport` + """ + cdef ConfComputeGpuAttestationReport gpu_atst_report_py = ConfComputeGpuAttestationReport() + cdef nvmlConfComputeGpuAttestationReport_t *gpu_atst_report = (gpu_atst_report_py._get_ptr()) with nogil: - __status__ = nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpu_instance, ptr) + memcpy(gpu_atst_report.nonce, nonce, 32) + __status__ = nvmlDeviceGetConfComputeGpuAttestationReport(device, gpu_atst_report) check_status(__status__) - - return pCreatablePlacementInfo + return gpu_atst_report_py -cpdef object device_get_vgpu_type_creatable_placements(intptr_t device, unsigned int vgpu_type_id, unsigned int mode): - """Query the creatable vGPU placement ID of the vGPU type within a GPU instance. +cpdef tuple device_get_dram_encryption_mode(intptr_t device): + """Retrieves the current and pending DRAM Encryption modes for the device. Args: - device (Device): The identifier of the target device. - vgpu_type_id (unsigned int): The vGPU type ID. - mode (unsigned int): The placement mode. 0: Heterogeneous, 1: Homogeneous. + device (intptr_t): The identifier of the target device. Returns: - VgpuPlacementList_v2: The vGPU placement list structure. - """ + A 2-tuple containing: - cdef VgpuPlacementList_v2 pPlacementList = VgpuPlacementList_v2() - cdef nvmlVgpuPlacementList_v2_t *ptr = pPlacementList._get_ptr() + - nvmlEnableState_t: Reference in which to return the current DRAM Encryption mode. + - nvmlEnableState_t: Reference in which to return the pending DRAM Encryption mode. - # Get size of array + .. seealso:: `nvmlDeviceGetDramEncryptionMode` + """ + cdef nvmlDramEncryptionInfo_t current + cdef nvmlDramEncryptionInfo_t pending with nogil: - ptr.version = sizeof(nvmlVgpuPlacementList_v2_t) | (2 << 24) - ptr.count = 0 - ptr.placementIds = NULL - ptr.mode = mode - __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpu_type_id, ptr) - check_status_size(__status__) + current.version = sizeof(nvmlDramEncryptionInfo_t) | (1 << 24) + pending.version = sizeof(nvmlDramEncryptionInfo_t) | (1 << 24) + __status__ = nvmlDeviceGetDramEncryptionMode(device, ¤t, &pending) + check_status(__status__) + return (current.encryptionState, pending.encryptionState) - if ptr.count == 0: - return pPlacementList - cdef view.array placementIds = view.array(shape=(ptr.count,), itemsize=sizeof(unsigned int), format="I", mode="c") - pPlacementList.placement_ids = placementIds +cpdef device_set_dram_encryption_mode(intptr_t device, int dram_encryption): + """Set the DRAM Encryption mode for the device. + + Args: + device (intptr_t): The identifier of the target device. + dram_encryption (nvmlEnableState_t): The target DRAM Encryption mode. + .. seealso:: `nvmlDeviceSetDramEncryptionMode` + """ + cdef nvmlDramEncryptionInfo_t[1] encryption with nogil: - __status__ = nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpu_type_id, ptr) + encryption[0].version = sizeof(nvmlDramEncryptionInfo_t) | (1 << 24) + encryption[0].encryptionState = dram_encryption + __status__ = nvmlDeviceSetDramEncryptionMode(device, encryption) check_status(__status__) - return pPlacementList - -cpdef object vgpu_instance_get_metadata(unsigned int vgpu_instance): - """Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its - associated VM such as the currently installed NVIDIA guest driver version, together with host driver version and - an opaque data section containing internal state. +cpdef object device_get_gpu_instance_profile_info_by_id_v(intptr_t device, unsigned int profile_id): + """GPU instance profile query function that accepts profile ID, instead of profile name. It accepts a versioned ``nvmlGpuInstanceProfileInfo_v3_t`` or later output structure. Args: - vgpu_instance (unsigned int): The identifier of the target vGPU instance. + device (intptr_t): The identifier of the target device. + profile_id (unsigned int): One of the profile IDs. Returns: - VgpuMetadata: Metadata. - """ - cdef VgpuMetadata vgpuMetadata = VgpuMetadata() - cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuMetadata_t)] - cdef nvmlVgpuMetadata_t *ptr = vgpuMetadata._get_ptr() + nvmlGpuInstanceProfileInfo_v3_t: Returns detailed profile information. + .. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoByIdV` + """ + cdef GpuInstanceProfileInfo_v3 info_py = GpuInstanceProfileInfo_v3() + cdef nvmlGpuInstanceProfileInfo_v3_t *info = (info_py._get_ptr()) + info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24) with nogil: - __status__ = nvmlVgpuInstanceGetMetadata(vgpu_instance, ptr, bufferSize) - check_status_size(__status__) - - return vgpuMetadata + __status__ = nvmlDeviceGetGpuInstanceProfileInfoByIdV(device, profile_id, info) + check_status(__status__) + return info_py -cpdef object device_get_vgpu_metadata(intptr_t device): - """Returns a vGPU metadata structure for the physical GPU indicated by device. The structure contains - information about the GPU and the currently installed NVIDIA host driver version that's controlling it, - together with an opaque data section containing internal state. +cpdef object device_get_gpu_instance_profile_info_v(intptr_t device, unsigned int profile): + """Versioned wrapper around ``nvmlDeviceGetGpuInstanceProfileInfo`` that accepts a versioned ``nvmlGpuInstanceProfileInfo_v3_t`` or later output structure. Args: - device (Device): The identifier of the target device. + device (intptr_t): The identifier of the target device. + profile (unsigned int): One of the NVML_GPU_INSTANCE_PROFILE_*. Returns: - VgpuPgpuMetadata: Metadata. - """ - cdef VgpuPgpuMetadata pgpuMetadata = VgpuPgpuMetadata() - cdef unsigned int[1] bufferSize = [sizeof(nvmlVgpuPgpuMetadata_t)] - cdef nvmlVgpuPgpuMetadata_t *ptr = pgpuMetadata._get_ptr() + nvmlGpuInstanceProfileInfo_v3_t: Returns detailed profile information. + .. seealso:: `nvmlDeviceGetGpuInstanceProfileInfoV` + """ + cdef GpuInstanceProfileInfo_v3 info_py = GpuInstanceProfileInfo_v3() + cdef nvmlGpuInstanceProfileInfo_v3_t *info = (info_py._get_ptr()) + info.version = sizeof(nvmlGpuInstanceProfileInfo_v3_t) | (3 << 24) with nogil: - __status__ = nvmlDeviceGetVgpuMetadata(device, ptr, bufferSize) - check_status_size(__status__) - - return pgpuMetadata + __status__ = nvmlDeviceGetGpuInstanceProfileInfoV(device, profile, info) + check_status(__status__) + return info_py -cpdef object get_vgpu_compatibility(VgpuMetadata vgpu_metadata, VgpuPgpuMetadata pgpu_metadata): - """Takes a vGPU instance metadata structure read from vgpu_instance_get_metadata() and a vGPU metadata structure - for a physical GPU read from device_get_vgpu_metadata, and returns compatibility information of the vGPU instance - and the physical GPU. +cpdef intptr_t device_get_handle_by_uuidv(int type, bytes uuid) except? 0: + """Acquire the handle for a particular device, based on its globally unique immutable UUID (in either ASCII or binary format) associated with each device. See ``nvmlUUID_v1_t`` for more information on the UUID struct. The caller must set the appropriate version prior to calling this API. Args: - vgpu_metadata (VgpuMetadata): The vGPU instance metadata. - pgpu_metadata (VgpuPgpuMetadata): The physical GPU metadata. + type (UUIDType): The format of the UUID being provided (ASCII or binary). + uuid (intptr_t): The UUID of the target GPU or MIG instance. Returns: - VgpuPgpuCompatibility: Compatibility information. + intptr_t: Reference in which to return the device handle or MIG device handle. + + .. seealso:: `nvmlDeviceGetHandleByUUIDV` """ - cdef VgpuPgpuCompatibility compatibilityInfo = VgpuPgpuCompatibility() - cdef nvmlVgpuPgpuCompatibility_t *ptr = compatibilityInfo._get_ptr() - cdef nvmlVgpuMetadata_t *vgpu_metadata_ptr = vgpu_metadata._get_ptr() - cdef nvmlVgpuPgpuMetadata_t *pgpu_metadata_ptr = pgpu_metadata._get_ptr() + cdef Device device + cdef nvmlUUID_t[1] uuid_struct + cdef int NVML_DEVICE_UUID_ASCII_LEN = 41 + cdef int NVML_DEVICE_UUID_BINARY_LEN = 16 + cdef char *uuid_ptr = cpython.PyBytes_AsString(uuid) + + if type == UUIDType.ASCII: + if len(uuid) != NVML_DEVICE_UUID_ASCII_LEN - 1: + raise ValueError(f"UUID ASCII string must be {NVML_DEVICE_UUID_ASCII_LEN - 1} bytes long") + memcpy((uuid_struct[0].value.str), uuid_ptr, NVML_DEVICE_UUID_ASCII_LEN) + elif type == UUIDType.BINARY: + if len(uuid) != NVML_DEVICE_UUID_BINARY_LEN - 1: + raise ValueError(f"UUID binary string must be {NVML_DEVICE_UUID_BINARY_LEN - 1} bytes long") + memcpy((uuid_struct[0].value.bytes), uuid_ptr, NVML_DEVICE_UUID_BINARY_LEN) + else: + raise ValueError("Invalid UUID format specified") with nogil: - __status__ = nvmlGetVgpuCompatibility(vgpu_metadata_ptr, pgpu_metadata_ptr, ptr) + uuid_struct[0].version = sizeof(nvmlUUID_v1_t) | (1 << 24) + uuid_struct[0].type = type + __status__ = nvmlDeviceGetHandleByUUIDV(uuid_struct, &device) check_status(__status__) + return device - return compatibilityInfo +cpdef unsigned long long device_get_pdi(intptr_t device): + """Retrieves the Per Device Identifier (PDI) associated with this device. -cpdef tuple get_vgpu_version(): - """Query the ranges of supported vGPU versions. + Args: + device (intptr_t): The identifier of the target device. Returns: - tuple: A tuple of (VgpuVersion supported, VgpuVersion current). - """ - cdef VgpuVersion supported = VgpuVersion() - cdef nvmlVgpuVersion_t *supported_ptr = supported._get_ptr() - cdef VgpuVersion current = VgpuVersion() - cdef nvmlVgpuVersion_t *current_ptr = current._get_ptr() + unsigned long long: The GPU PDI. + .. seealso:: `nvmlDeviceGetPdi` + """ + cdef nvmlPdi_v1_t[1] pdi + pdi[0].version = sizeof(nvmlPdi_v1_t) | (1 << 24) with nogil: - __status__ = nvmlGetVgpuVersion(supported_ptr, current_ptr) - + __status__ = nvmlDeviceGetPdi(device, pdi) check_status(__status__) - return (supported, current) + return pdi[0].value -cpdef object device_get_vgpu_instances_utilization_info(intptr_t device): - """ - Retrieves recent utilization for vGPU instances running on a physical GPU (device). +cpdef str device_get_performance_modes(intptr_t device): + """Retrieves a performance mode string with all the performance modes defined for this device along with their associated GPU Clock and Memory Clock values. Not all tokens will be reported on all GPUs, and additional tokens may be added in the future. For backwards compatibility we still provide nvclock and memclock; those are the same as nvclockmin and memclockmin. Args: - device (Device): The identifier of the target device. + device (intptr_t): The identifier of the target device. Returns: - VgpuInstancesUtilizationInfo_v1: The vGPU instances utilization information structure. - """ - cdef VgpuInstancesUtilizationInfo_v1 vgpuUtilInfo = VgpuInstancesUtilizationInfo_v1() - cdef nvmlVgpuInstancesUtilizationInfo_v1_t *ptr = vgpuUtilInfo._get_ptr() + str: The performance level string. + .. seealso:: `nvmlDeviceGetPerformanceModes` + """ + cdef nvmlDevicePerfModes_t[1] perf_modes + perf_modes[0].version = sizeof(nvmlDevicePerfModes_v1_t) | (1 << 24) with nogil: - ptr.version = sizeof(nvmlVgpuInstancesUtilizationInfo_v1_t) | (1 << 24) - ptr.vgpuInstanceCount = 0 - ptr.vgpuUtilArray = NULL - __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(device, ptr) - check_status_size(__status__) + __status__ = nvmlDeviceGetPerformanceModes(device, perf_modes) + check_status(__status__) + return cpython.PyUnicode_FromString(perf_modes[0].str) - if ptr.vgpuInstanceCount == 0: - return vgpuUtilInfo - cdef VgpuInstanceUtilizationInfo_v1 vgpuUtilArray = VgpuInstanceUtilizationInfo_v1(ptr.vgpuInstanceCount) - vgpuUtilInfo.vgpu_util_array = vgpuUtilArray +cpdef unsigned int device_get_unrepairable_memory_flag_v1(intptr_t device): + """Get the unrepairable memory flag for a given GPU. + + Args: + device (intptr_t): The identifier of the target device. + + Returns: + unsigned int: unrepairable memory status + .. seealso:: `nvmlDeviceGetUnrepairableMemoryFlag_v1` + """ + cdef nvmlUnrepairableMemoryStatus_v1_t[1] unrepairable_memory_status with nogil: - __status__ = nvmlDeviceGetVgpuInstancesUtilizationInfo(device, ptr) + __status__ = nvmlDeviceGetUnrepairableMemoryFlag_v1(device, unrepairable_memory_status) check_status(__status__) - - return vgpuUtilInfo + return unrepairable_memory_status.bUnrepairableMemory -cpdef object device_get_vgpu_processes_utilization_info(intptr_t device, unsigned int last_seen_time_stamp): - """ - Retrieves recent utilization for processes running on vGPU instances on a physical GPU (device). +cpdef unsigned int device_get_vgpu_heterogeneous_mode(intptr_t device): + """Get the vGPU heterogeneous mode for the device. Args: - device (Device): The identifier of the target device. + device (intptr_t): The identifier of the target device. Returns: - VgpuProcessesUtilizationInfo: The vGPU processes utilization information structure. - """ - cdef VgpuProcessesUtilizationInfo_v1 vgpuProcUtilInfo = VgpuProcessesUtilizationInfo_v1() - cdef nvmlVgpuProcessesUtilizationInfo_v1_t *ptr = vgpuProcUtilInfo._get_ptr() + unsigned int: The mode + .. seealso:: `nvmlDeviceGetVgpuHeterogeneousMode` + """ + cdef nvmlVgpuHeterogeneousMode_t[1] heterogeneous_mode with nogil: - ptr.version = sizeof(nvmlVgpuProcessesUtilizationInfo_v1_t) | (1 << 24) - ptr.vgpuProcessCount = 0 - ptr.vgpuProcUtilArray = NULL - ptr.lastSeenTimeStamp = last_seen_time_stamp - __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(device, ptr) - check_status_size(__status__) + heterogeneous_mode[0].version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24) + __status__ = nvmlDeviceGetVgpuHeterogeneousMode(device, heterogeneous_mode) + check_status(__status__) + return heterogeneous_mode[0].mode - if ptr.vgpuProcessCount == 0: - return vgpuProcUtilInfo - cdef VgpuProcessUtilizationInfo_v1 vgpuProcUtilArray = VgpuProcessUtilizationInfo_v1(ptr.vgpuProcessCount) - vgpuProcUtilInfo.vgpu_proc_util_array = vgpuProcUtilArray +cpdef device_set_vgpu_heterogeneous_mode(intptr_t device, int mode): + """Enable or disable vGPU heterogeneous mode for the device. + + Args: + device (intptr_t): Identifier of the target device. + heterogeneous_mode (unsigned int): mode + .. seealso:: `nvmlDeviceSetVgpuHeterogeneousMode` + """ + cdef nvmlVgpuHeterogeneousMode_t[1] heterogeneous_mode with nogil: - __status__ = nvmlDeviceGetVgpuProcessesUtilizationInfo(device, ptr) + heterogeneous_mode[0].version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24) + heterogeneous_mode[0].mode = mode + __status__ = nvmlDeviceSetVgpuHeterogeneousMode(device, heterogeneous_mode) check_status(__status__) - return vgpuProcUtilInfo - -cpdef object device_get_gpu_instances(intptr_t device, unsigned int profile_id): - """Get GPU instances for given profile ID. +cpdef object gpu_instance_get_vgpu_heterogeneous_mode(intptr_t gpu_instance): + """Get the vGPU heterogeneous mode for the GPU instance. Args: - device (Device): The identifier of the target device. - profile_id (unsigned int): The GPU instance profile ID. See device_get_gpu_instance_profile_info(). + gpu_instance (intptr_t): The GPU instance handle. Returns: - array: An array of GPU instance handles. + unsigned int: the mode + + .. seealso:: `nvmlGpuInstanceGetVgpuHeterogeneousMode` """ - cdef unsigned int[1] count = [0] + cdef nvmlVgpuHeterogeneousMode_t[1] heterogeneous_mode with nogil: - __status__ = nvmlDeviceGetGpuInstances(device, profile_id, NULL, count) - check_status_size(__status__) + heterogeneous_mode[0].version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24) + __status__ = nvmlGpuInstanceGetVgpuHeterogeneousMode(gpu_instance, heterogeneous_mode) + check_status(__status__) + return heterogeneous_mode[0].mode - if count[0] == 0: - view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] - cdef view.array gpuInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") +cpdef gpu_instance_set_vgpu_heterogeneous_mode(intptr_t gpu_instance, unsigned int mode): + """Enable or disable vGPU heterogeneous mode for the GPU instance. + + Args: + gpu_instance (intptr_t): The GPU instance handle. + mode (unsigned int): The mode + + .. seealso:: `nvmlGpuInstanceSetVgpuHeterogeneousMode` + """ + cdef nvmlVgpuHeterogeneousMode_t[1] heterogeneous_mode with nogil: - __status__ = nvmlDeviceGetGpuInstances(device, profile_id, gpuInstances.data, count) + heterogeneous_mode[0].version = sizeof(nvmlVgpuHeterogeneousMode_v1_t) | (1 << 24) + heterogeneous_mode[0].mode = mode + __status__ = nvmlGpuInstanceSetVgpuHeterogeneousMode(gpu_instance, heterogeneous_mode) check_status(__status__) - return gpuInstances - -cpdef object gpu_instance_get_compute_instances(intptr_t gpu_instance, unsigned int profile_id): - """Get Compute instances for given profile ID. +cpdef tuple device_get_vgpu_utilization(intptr_t device, unsigned long long last_seen_time_stamp): + """Retrieves current utilization for vGPUs on a physical GPU (device). Args: - gpu_instance (GpuInstance): The identifier of the target GPU Instance. - profile_id (unsigned int): The Compute instance profile ID. + device (intptr_t): The identifier for the target device. + last_seen_time_stamp (unsigned long long): Return only samples with timestamp greater than last_seen_time_stamp. Returns: - array: An array of Compute instance handles. + A 2-tuple containing: + + - samples: Returned sample values. + - utilizationSamples: Utilization samples. + + .. seealso:: `nvmlDeviceGetVgpuUtilization` """ - cdef unsigned int[1] count = [0] + cdef unsigned int vgpu_instance_samples_count with nogil: - __status__ = nvmlGpuInstanceGetComputeInstances(gpu_instance, profile_id, NULL, count) + __status__ = nvmlDeviceGetVgpuUtilization( + device, + last_seen_time_stamp, + NULL, + &vgpu_instance_samples_count, + NULL + ) check_status_size(__status__) - if count[0] == 0: - view.array(shape=(1,), itemsize=sizeof(intptr_t), format="P", mode="c")[:0] + if vgpu_instance_samples_count == 0: + return ( + view.array(shape=(1,), itemsize=sizeof(int), format="I", mode="c")[:0], + VgpuInstanceUtilizationSample(0) + ) + + cdef view.array arr = view.array(shape=(vgpu_instance_samples_count,), itemsize=sizeof(int), format="I", mode="c") + cdef VgpuInstanceUtilizationSample utilization_samples_py = VgpuInstanceUtilizationSample(vgpu_instance_samples_count) + cdef nvmlVgpuInstanceUtilizationSample_t *ptr = utilization_samples_py._get_ptr() - cdef view.array computeInstances = view.array(shape=(count[0],), itemsize=sizeof(intptr_t), format="P", mode="c") with nogil: - __status__ = nvmlGpuInstanceGetComputeInstances(gpu_instance, profile_id, computeInstances.data, count) + __status__ = nvmlDeviceGetVgpuUtilization( + device, + last_seen_time_stamp, + arr.data, + &vgpu_instance_samples_count, + ptr + ) check_status(__status__) - return computeInstances + return (arr, utilization_samples_py) -cpdef object device_get_sram_unique_uncorrected_ecc_error_counts(intptr_t device): - """Retrieves the counts of SRAM unique uncorrected ECC errors +cpdef object device_read_prm_counters_v1(intptr_t device, PRMCounter_v1 counters): + """Read a list of GPU PRM Counters. Args: - device (Device): The identifier of the target device. + device (intptr_t): Identifer of target GPU device. + counters (PRMCounter_v1): Array holding the input parameters as well as the retrieved counter values. - Returns: - EccSramUniqueUncorrectedErrorCounts_v1: The ECC SRAM unique uncorrected error counts structure. + .. seealso:: `nvmlDeviceReadPRMCounters_v1` """ + # Unlike in the raw C API, counter_list here is an PRMCounter_v1 + # AUTO_LOWPP_ARRAY, and we need to wrap it in a nvmlPRMCounterList_v1_t. - cdef EccSramUniqueUncorrectedErrorCounts_v1 errorCounts = EccSramUniqueUncorrectedErrorCounts_v1() - cdef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t *ptr = errorCounts._get_ptr() + cdef nvmlPRMCounterList_v1_t[1] counter_list + counter_list[0].numCounters = len(counters) + counter_list[0].counters = counters._get_ptr() with nogil: - ptr.version = sizeof(nvmlEccSramUniqueUncorrectedErrorCounts_v1_t) | (1 << 24) - ptr.entryCount = 0 - ptr.entries = NULL - __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, ptr) - check_status_size(__status__) + __status__ = nvmlDeviceReadPRMCounters_v1(device, counter_list) + check_status(__status__) - cdef EccSramUniqueUncorrectedErrorEntry_v1 entries = EccSramUniqueUncorrectedErrorEntry_v1(ptr.entryCount) - errorCounts.entries = entries + return counters - if ptr.entryCount == 0: - return errorCounts - with nogil: - __status__ = nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, ptr) - check_status(__status__) +ctypedef union __nvmlPRMTLV_v1_value_t: + char[496] inData + char[496] outData - return errorCounts +ctypedef struct __nvmlPRMTLV_v1_t: + unsigned dataSize + unsigned status + __nvmlPRMTLV_v1_value_t value -cpdef object device_get_gpu_fabric_info_v(intptr_t device): - """Versioned wrapper around nvmlDeviceGetGpuFabricInfo that accepts a versioned ``nvmlGpuFabricInfo_v2_t`` or later output structure. + +cpdef tuple device_read_write_prm_v1(intptr_t device, bytes in_data): + """Read or write a GPU PRM register. The input is assumed to be in TLV format in network byte order. Args: - device (intptr_t): The identifier of the target device. + device (intptr_t): Identifer of target GPU device. + in_data (bytes): The input data for the PRM register. Returns: - nvmlGpuFabricInfo_v3_t: Information about GPU fabric state. + A 2-tuple containing: - .. seealso:: `nvmlDeviceGetGpuFabricInfoV` + - unsigned int: Status of the PRM operation. + - bytes: Output data in TLV format. + + .. seealso:: `nvmlDeviceReadWritePRM_v1` """ - cdef GpuFabricInfo_v3 gpu_fabric_info_v3_py - cdef GpuFabricInfo_v2 gpu_fabric_info_v2_py - cdef nvmlGpuFabricInfoV_t *gpu_fabric_info - if CUDA_VERSION >= 13000: - gpu_fabric_info_v3_py = GpuFabricInfo_v3() - gpu_fabric_info = (gpu_fabric_info_v3_py._get_ptr()) - gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v3_t) | (3 << 24) - with nogil: - __status__ = nvmlDeviceGetGpuFabricInfoV(device, gpu_fabric_info) - check_status(__status__) - return gpu_fabric_info_v3_py + cdef int NVML_PRM_DATA_MAX_SIZE = 496 + cdef __nvmlPRMTLV_v1_t buffer + cdef int in_data_size = len(in_data) - else: - gpu_fabric_info_v2_py = GpuFabricInfo_v2() - gpu_fabric_info = (gpu_fabric_info_v2_py._get_ptr()) - gpu_fabric_info.version = sizeof(nvmlGpuFabricInfo_v2_t) | (2 << 24) - with nogil: - __status__ = nvmlDeviceGetGpuFabricInfoV(device, gpu_fabric_info) - check_status(__status__) - return gpu_fabric_info_v2_py + if in_data_size > NVML_PRM_DATA_MAX_SIZE - 1: + raise ValueError(f"Input data size exceeds maximum allowed size of {NVML_PRM_DATA_MAX_SIZE - 1} bytes") + cdef char *in_data_ptr = cpython.PyBytes_AsString(in_data) + + with nogil: + memcpy((buffer.value.inData), in_data_ptr, in_data_size) + buffer.dataSize = in_data_size + __status__ = nvmlDeviceReadWritePRM_v1(device, &buffer) + check_status(__status__) + + cdef bytes out_data = cpython.PyBytes_FromStringAndSize(buffer.value.outData, buffer.dataSize) + cdef unsigned int status = buffer.status + + return (status, out_data) -cpdef object device_get_platform_info(intptr_t device): - """Get platform information of this device. + +cpdef device_set_nvlink_device_low_power_threshold(intptr_t device, unsigned int threshold): + """Set NvLink Low Power Threshold for device. Args: device (intptr_t): The identifier of the target device. + threshold (unsigned int): - Returns: - nvmlPlatformInfo_v2_t: Pointer to the caller-provided structure of nvmlPlatformInfo_t. - - .. seealso:: `nvmlDeviceGetPlatformInfo` + .. seealso:: `nvmlDeviceSetNvLinkDeviceLowPowerThreshold` """ - cdef PlatformInfo_v1 platform_info_v1_py - cdef PlatformInfo_v2 platform_info_v2_py - cdef nvmlPlatformInfo_t *platform_info - - if CUDA_VERSION >= 13000: - platform_info_v2_py = PlatformInfo_v2() - platform_info = (platform_info_v2_py._get_ptr()) - platform_info.version = sizeof(nvmlPlatformInfo_v2_t) | (2 << 24) - with nogil: - __status__ = nvmlDeviceGetPlatformInfo(device, platform_info) - check_status(__status__) - return platform_info_v2_py + cdef nvmlNvLinkPowerThres_t[1] info - else: - platform_info_v1_py = PlatformInfo_v1() - platform_info = (platform_info_v1_py._get_ptr()) - platform_info.version = sizeof(nvmlPlatformInfo_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlDeviceGetPlatformInfo(device, platform_info) - check_status(__status__) - return platform_info_v1_py + with nogil: + info[0].lowPwrThreshold + __status__ = nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info) + check_status(__status__) -cpdef object device_get_nvlink_info(intptr_t device): - """Query NVLINK information associated with this device. +cpdef unsigned int device_set_power_management_limit_v2(intptr_t device, int power_scope, unsigned int power_value_mw): + """Set new power limit of this device. Args: device (intptr_t): The identifier of the target device. + power_scope (PowerScope): Device type + power_value_mw (unsigned int): Power value to retrieve or set in milliwatts - Returns: - nvmlNvLinkInfo_v2_t: Reference to ``nvmlNvLinkInfo_t``. - - .. seealso:: `nvmlDeviceGetNvLinkInfo` + .. seealso:: `nvmlDeviceSetPowerManagementLimit_v2` """ - cdef NvLinkInfo_v1 info_v1_py - cdef NvLinkInfo_v2 info_v2_py - cdef nvmlNvLinkInfo_t *info - - if CUDA_VERSION >= 13000: - info_v2_py = NvLinkInfo_v2() - info = (info_v2_py._get_ptr()) - info.version = sizeof(nvmlNvLinkInfo_v2_t) | (2 << 24) - with nogil: - __status__ = nvmlDeviceGetNvLinkInfo(device, info) - check_status(__status__) - return info_v2_py - - else: - info_v1_py = NvLinkInfo_v1() - info = (info_v1_py._get_ptr()) - info.version = sizeof(nvmlNvLinkInfo_v1_t) | (1 << 24) - with nogil: - __status__ = nvmlDeviceGetNvLinkInfo(device, info) - check_status(__status__) - return info_v1_py - + cdef nvmlPowerValue_v2_t[1] power_value -cpdef intptr_t system_event_set_create(): - """Create an empty set of system events. Event set should be freed by ``nvmlSystemEventSetFree``.""" - cdef nvmlSystemEventSetCreateRequest_v1_t[1] request with nogil: - request[0].version = sizeof(nvmlSystemEventSetCreateRequest_v1_t) | (1 << 24) - __status__ = nvmlSystemEventSetCreate(request) + power_value[0].version = sizeof(nvmlPowerValue_v2_t) | (2 << 24) + power_value[0].powerScope = power_scope + power_value[0].powerValueMw = power_value_mw + __status__ = nvmlDeviceSetPowerManagementLimit_v2(device, power_value) check_status(__status__) - return (request[0].set) -cpdef system_event_set_free(intptr_t event_set): - """Frees an event set.""" - cdef nvmlSystemEventSetFreeRequest_v1_t[1] request - request[0].set = event_set +cpdef device_set_rusd_settings_v1(intptr_t device, unsigned long long poll_mask): + """Set Read-only user shared data (RUSD) settings for GPU. Requires root/admin permissions. + + Args: + device (intptr_t): The identifier of the target device. + poll_mask (unsigned long long): Bitmask of polling data. 0 value means the GPU's RUSD polling mask is cleared + + .. seealso:: `nvmlDeviceSetRusdSettings_v1` + """ + cdef nvmlRusdSettings_v1_t[1] settings with nogil: - request[0].version = sizeof(nvmlSystemEventSetFreeRequest_v1_t) | (1 << 24) - __status__ = nvmlSystemEventSetFree(request) + settings[0].version = sizeof(nvmlRusdSettings_v1_t) | (1 << 24) + settings[0].pollMask = poll_mask + __status__ = nvmlDeviceSetRusdSettings_v1(device, settings) check_status(__status__) -cpdef system_register_events(unsigned long long event_types, intptr_t event_set): - """Starts recording of events on system and add the events to specified ``nvmlSystemEventSet_t``. +cpdef device_set_temperature_threshold(intptr_t device, int threshold_type, int temp): + """Sets the temperature threshold for the GPU with the specified threshold type in degrees C. Args: - event_types (unsigned long long): Bitmask of nvmlSystemEventType_t values representing the events to register. - event_set (intptr_t): The system event set handle. + device (intptr_t): The identifier of the target device. + threshold_type (TemperatureThresholds): The type of threshold value to be set. + temp (int): The value to be set. + + .. seealso:: `nvmlDeviceSetTemperatureThreshold` """ - cdef nvmlSystemRegisterEventRequest_v1_t[1] request - request[0].set = event_set - request[0].eventTypes = event_types with nogil: - request[0].version = sizeof(nvmlSystemRegisterEventRequest_v1_t) | (1 << 24) - __status__ = nvmlSystemRegisterEvents(request) + __status__ = nvmlDeviceSetTemperatureThreshold(device, <_TemperatureThresholds>threshold_type, &temp) check_status(__status__) -cpdef object system_event_set_wait(intptr_t event_set, unsigned int timeout_ms, unsigned int buffer_size): - """Waits for events to occur on the system event set. - - Args: - event_set (intptr_t): The system event set handle. - timeout_ms (unsigned int): The maximum amount of time in milliseconds to wait for an event. - buffer_size (unsigned int): The size of the event buffer. +cpdef unsigned long long system_get_conf_compute_key_rotation_threshold_info(): + """Get Conf Computing key rotation threshold detail. Returns: - SystemEvent: The system event that occurred. + unsigned long long: The key rotation threshold data. + + .. seealso:: `nvmlSystemGetConfComputeKeyRotationThresholdInfo` """ - cdef nvmlSystemEventSetWaitRequest_v1_t[1] request - cdef SystemEventData_v1 event_data = SystemEventData_v1(buffer_size) - request[0].timeoutms = timeout_ms - request[0].set = event_set - request[0].data = (event_data._get_ptr()) - request[0].dataSize = buffer_size + cdef nvmlConfComputeGetKeyRotationThresholdInfo_t[1] key_rotation_thr_info with nogil: - request[0].version = sizeof(nvmlSystemEventSetWaitRequest_v1_t) | (1 << 24) - __status__ = nvmlSystemEventSetWait(request) + key_rotation_thr_info[0].version = sizeof(nvmlConfComputeGetKeyRotationThresholdInfo_v1_t) | (1 << 24) + __status__ = nvmlSystemGetConfComputeKeyRotationThresholdInfo(key_rotation_thr_info) check_status(__status__) - event_data._data.resize((request[0].numEvent,)) - return event_data + return key_rotation_thr_info[0].attackerAdvantage -cpdef unsigned int device_get_fan_speed_rpm(intptr_t device, unsigned int fan): - """Retrieves the intended operating speed in rotations per minute (RPM) of the device's specified fan. +cpdef system_set_conf_compute_key_rotation_threshold_info(unsigned long long max_attacker_advantage): + """Set Conf Computing key rotation threshold. Args: - device (intptr_t): The identifier of the target device. - fan (unsigned int): The index of the fan to query. - - Returns: - rpm (unsigned int): The fan speed in RPM. + max_attacker_advantage (unsigned long long): The key rotation threshold data. - .. seealso:: `nvmlDeviceGetFanSpeedRPM` + .. seealso:: `nvmlSystemSetConfComputeKeyRotationThresholdInfo` """ - cdef nvmlFanSpeedInfo_v1_t[1] fan_speed - fan_speed[0].version = sizeof(nvmlFanSpeedInfo_v1_t) | (1 << 24) - fan_speed[0].fan = fan + cdef nvmlConfComputeSetKeyRotationThresholdInfo_t[1] key_rotation_thr_info with nogil: - __status__ = nvmlDeviceGetFanSpeedRPM(device, fan_speed) + key_rotation_thr_info[0].version = sizeof(nvmlConfComputeSetKeyRotationThresholdInfo_v1_t) | (1 << 24) + key_rotation_thr_info[0].maxAttackerAdvantage = max_attacker_advantage + __status__ = nvmlSystemSetConfComputeKeyRotationThresholdInfo(key_rotation_thr_info) check_status(__status__) - return fan_speed[0].speed -cpdef int device_get_margin_temperature(intptr_t device): - """Retrieves the thermal margin temperature (distance to nearest slowdown threshold). +cpdef unsigned long long vgpu_instance_get_runtime_state_size(unsigned int vgpu_instance): + """Retrieve the currently used runtime state size of the vGPU instance. Args: - device (intptr_t): The identifier of the target device. + vgpu_instance (unsigned int): Identifier of the target vGPU instance. Returns: - margin_temperature (int): The margin temperature value. + unsigned long long: Runtime state size of the vGPU instance. - .. seealso:: `nvmlDeviceGetMarginTemperature` + .. seealso:: `nvmlVgpuInstanceGetRuntimeStateSize` """ - cdef nvmlMarginTemperature_v1_t[1] margin_temp_info - margin_temp_info[0].version = sizeof(nvmlMarginTemperature_v1_t) | (1 << 24) + cdef nvmlVgpuRuntimeState_t[1] p_state with nogil: - __status__ = nvmlDeviceGetMarginTemperature(device, margin_temp_info) + p_state[0].version = sizeof(nvmlVgpuRuntimeState_v1_t) | (1 << 24) + __status__ = nvmlVgpuInstanceGetRuntimeStateSize(vgpu_instance, p_state) check_status(__status__) - return margin_temp_info[0].marginTemperature + return p_state[0].size -cpdef object device_get_clock_offsets(intptr_t device, nvmlClockType_t clock_type, nvmlPstates_t pstate): - """Retrieve min, max and current clock offset of some clock domain for a given PState. +cpdef unsigned int vgpu_type_get_max_instances_per_gpu_instance(unsigned int vgpu_type_id): + """Retrieve the maximum number of vGPU instances per GPU instance for given vGPU type. Args: - device (intptr_t): The identifier of the target device. + vgpu_type_id (VgpuTypeId): Handle to vGPU type. Returns: - nvmlClockOffset_v1_t: Structure specifying the clock type (input) and the pstate (input) retrieved clock offset value (output), min clock offset (output) and max clock offset (output). + unsigned int: Maximum number of vGPU instances per GPU instance - .. seealso:: `nvmlDeviceGetClockOffsets` + .. seealso:: `nvmlVgpuTypeGetMaxInstancesPerGpuInstance` """ - cdef ClockOffset_v1 info_py = ClockOffset_v1() - cdef nvmlClockOffset_v1_t *info = (info_py._get_ptr()) - info.version = sizeof(nvmlClockOffset_v1_t) | (1 << 24) - info.type = clock_type - info.pstate = pstate + cdef nvmlVgpuTypeMaxInstance_t[1] max_instance with nogil: - __status__ = nvmlDeviceGetClockOffsets(device, info) + max_instance[0].version = sizeof(nvmlVgpuTypeMaxInstance_v1_t) | (1 << 24) + max_instance[0].vgpuTypeId = vgpu_type_id + __status__ = nvmlVgpuTypeGetMaxInstancesPerGpuInstance(max_instance) check_status(__status__) - return info_py + return max_instance[0].maxInstancePerGI diff --git a/cuda_bindings/cuda/bindings/cy_nvml.pxd b/cuda_bindings/cuda/bindings/cy_nvml.pxd index 6b302c4cdc..7a34112b6a 100644 --- a/cuda_bindings/cuda/bindings/cy_nvml.pxd +++ b/cuda_bindings/cuda/bindings/cy_nvml.pxd @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # @@ -1927,7 +1927,6 @@ cdef nvmlReturn_t nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t devi cdef nvmlReturn_t nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil @@ -2066,13 +2065,7 @@ cdef nvmlReturn_t nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned i cdef nvmlReturn_t nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, nvmlDevice_t* migDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil -cdef nvmlReturn_t nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil cdef nvmlReturn_t nvmlDevicePowerSmoothingSetState(nvmlDevice_t device, nvmlPowerSmoothingState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil diff --git a/cuda_bindings/cuda/bindings/cy_nvml.pyx b/cuda_bindings/cuda/bindings/cy_nvml.pyx index 961b3a9208..addcc7e5a3 100644 --- a/cuda_bindings/cuda/bindings/cy_nvml.pyx +++ b/cuda_bindings/cuda/bindings/cy_nvml.pyx @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE # @@ -759,10 +759,6 @@ cdef nvmlReturn_t nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTem return _nvml._nvmlDeviceSetTemperatureThreshold(device, thresholdType, temp) -cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlDeviceSetPowerManagementLimit(device, limit) - - cdef nvmlReturn_t nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: return _nvml._nvmlDeviceSetGpuOperationMode(device, mode) @@ -1315,34 +1311,10 @@ cdef nvmlReturn_t nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migD return _nvml._nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice, device) -cdef nvmlReturn_t nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlGpmSampleGet(device, gpmSample) - - -cdef nvmlReturn_t nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlGpmMigSampleGet(device, gpuInstanceId, gpmSample) - - -cdef nvmlReturn_t nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlGpmQueryDeviceSupport(device, gpmSupport) - - -cdef nvmlReturn_t nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlGpmQueryIfStreamingEnabled(device, state) - - -cdef nvmlReturn_t nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlGpmSetStreamingEnabled(device, state) - - cdef nvmlReturn_t nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: return _nvml._nvmlDeviceGetCapabilities(device, caps) -cdef nvmlReturn_t nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: - return _nvml._nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles) - - cdef nvmlReturn_t nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil: return _nvml._nvmlDevicePowerSmoothingActivatePresetProfile(device, profile) diff --git a/cuda_bindings/tests/nvml/test_device.py b/cuda_bindings/tests/nvml/test_device.py new file mode 100644 index 0000000000..91e5ec91b8 --- /dev/null +++ b/cuda_bindings/tests/nvml/test_device.py @@ -0,0 +1,159 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE + + +import cuda.bindings.runtime as cudart +import pytest +from cuda.bindings import _nvml as nvml + +from .conftest import unsupported_before + + +def isSuccess(err): + return err == cudart.cudaError_t.cudaSuccess + + +def assertSuccess(err): + assert isSuccess(err) + + +def driverVersionLessThan(target): + err, version = cudart.cudaDriverGetVersion() + assertSuccess(err) + return version < target + + +def test_device_capabilities(all_devices): + for device in all_devices: + capabilities = nvml.device_get_capabilities(device) + assert isinstance(capabilities, int) + + +def test_clk_mon_status_t(): + obj = nvml.ClkMonStatus() + assert len(obj.clk_mon_list) == 0 + assert not hasattr(obj, "clk_mon_list_size") + + +def test_current_clock_freqs(all_devices): + for device in all_devices: + clk_freqs = nvml.device_get_current_clock_freqs(device) + assert isinstance(clk_freqs, nvml.DeviceCurrentClockFreqs_v1) + assert isinstance(clk_freqs.str, str) + + +def test_grid_licensable_features(all_devices): + for device in all_devices: + features = nvml.device_get_grid_licensable_features_v4(device) + assert isinstance(features, nvml.GridLicensableFeatures) + # #define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 + assert len(features.grid_licensable_features) <= 3 + assert not hasattr(features, "licensable_features_count") + + for feature in features.grid_licensable_features: + nvml.GridLicenseFeatureCode(feature.feature_code) + assert isinstance(feature.feature_state, int) + assert isinstance(feature.license_info, str) + assert isinstance(feature.product_name, str) + assert isinstance(feature.feature_enabled, int) + nvml.GridLicenseExpiry(feature.license_expiry) + + +def test_get_handle_by_uuidv(all_devices): + for device in all_devices: + uuid = nvml.device_get_uuid(device) + new_handle = nvml.device_get_handle_by_uuidv(nvml.UUIDType.ASCII, uuid.encode("ascii")) + assert new_handle == device + + +def test_get_nv_link_supported_bw_modes(all_devices): + for device in all_devices: + with unsupported_before(device, None): + modes = nvml.device_get_nvlink_supported_bw_modes(device) + assert isinstance(modes, nvml.NvLinkSupportedBWModes) + # #define NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES 23 + assert len(modes.supported_bw_modes) <= 23 + assert not hasattr(modes, "total_bw_modes") + + for mode in modes.bw_modes: + assert isinstance(mode, int) + + +def test_device_get_pdi(all_devices): + for device in all_devices: + pdi = nvml.device_get_pdi(device) + assert isinstance(pdi, int) + + +def test_device_get_performance_modes(all_devices): + for device in all_devices: + modes = nvml.device_get_performance_modes(device) + assert isinstance(modes, str) + + +@pytest.mark.skipif(driverVersionLessThan(13010), reason="Introduced in 13.1") +def test_device_get_unrepairable_memory_flag(all_devices): + for device in all_devices: + status = nvml.device_get_unrepairable_memory_flag_v1(device) + assert isinstance(status, int) + + +def test_device_vgpu_get_heterogeneous_mode(all_devices): + for device in all_devices: + with unsupported_before(device, None): + mode = nvml.device_get_vgpu_heterogeneous_mode(device) + assert isinstance(mode, int) + + +def test_read_prm_counters(all_devices): + for device in all_devices: + counters = nvml.PRMCounter_v1(5) + with unsupported_before(device, None): + read_counters = nvml.device_read_prm_counters_v1(device, counters) + assert counters is read_counters + assert len(read_counters) == 5 + + +def test_read_write_prm(all_devices): + for device in all_devices: + with unsupported_before(device, nvml.DeviceArch.BLACKWELL): + result = nvml.device_read_write_prm_v1(device, b"012345678") + assert isinstance(result, tuple) + assert isinstance(result[0], int) + assert isinstance(result[1], bytes) + + +def test_nvlink_low_power_threshold(all_devices): + for device in all_devices: + with unsupported_before(device, nvml.DeviceArch.HOPPER): + nvml.device_set_nvlink_device_low_power_threshold(device, 0) + + +def test_get_power_management_limit(all_devices): + for device in all_devices: + # Docs say supported on KEPLER or later + with unsupported_before(device, None): + limit = nvml.device_get_power_management_limit(device) + + +def test_set_power_management_limit(all_devices): + for device in all_devices: + with unsupported_before(device, nvml.DeviceArch.KEPLER): + try: + nvml.device_set_power_management_limit_v2(device, nvml.PowerScope.GPU, 10000) + except nvml.NoPermissionError: + pytest.skip("No permission to set power management limit") + + +def test_set_temperature_threshold(all_devices): + for device in all_devices: + with unsupported_before(device, nvml.DeviceArch.MAXWELL): + temp = nvml.device_get_temperature_threshold( + device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_SHUTDOWN + ) + try: + nvml.device_set_temperature_threshold( + device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_SHUTDOWN, temp + ) + except nvml.NoPermissionError: + pytest.skip("No permission to set temperature threshold") diff --git a/cuda_bindings/tests/nvml/test_gpu.py b/cuda_bindings/tests/nvml/test_gpu.py index 79c478a407..1493a21354 100644 --- a/cuda_bindings/tests/nvml/test_gpu.py +++ b/cuda_bindings/tests/nvml/test_gpu.py @@ -1,6 +1,7 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE +import numpy as np import pytest from cuda.bindings import _nvml as nvml @@ -35,3 +36,47 @@ def test_gpu_get_platform_info(all_devices): platform_info = nvml.device_get_platform_info(device) assert isinstance(platform_info, nvml.PlatformInfo_v2) + + +# TODO: Test APIs related to GPU instances, which require specific hardware and root + +# def test_gpu_instance(all_devices): +# for device in all_devices: +# # Requires root +# gpu_instance = nvml.device_create_gpu_instance(device, nvml.GpuInstanceProfile.PROFILE_1_SLICE) + + +def test_conf_compute_attestation_report_t(all_devices): + report = nvml.ConfComputeGpuAttestationReport() + assert not hasattr(report, "attestation_report_size") + assert len(report.attestation_report) == 0 + assert not hasattr(report, "cec_attestation_report_size") + assert len(report.cec_attestation_report) == 0 + assert len(report.nonce) == 32 + assert report.nonce.dtype == np.uint8 + + +def test_gpu_conf_compute_attestation_report(all_devices): + for device in all_devices: + # Documentation says AMPERE or newer + with unsupported_before(device, None): + report = nvml.device_get_conf_compute_gpu_attestation_report(device, nonce=b"12345678") + + assert isinstance(report, nvml.ComputeGpuAttestationReport) + + +def test_conf_compute_gpu_certificate_t(): + cert = nvml.ConfComputeGpuCertificate() + assert not hasattr(cert, "cert_chain_size") + assert len(cert.cert_chain) == 0 + assert not hasattr(cert, "attestation_cert_chain_size") + assert len(cert.attestation_cert_chain) == 0 + + +def test_conf_compute_gpu_certificate(all_devices): + for device in all_devices: + # Documentation says AMPERE or newer + with unsupported_before(device, None): + cert = nvml.device_get_conf_compute_gpu_certificate(device) + + assert isinstance(cert, nvml.ComputeGpuCertificate) diff --git a/cuda_bindings/tests/nvml/test_pci.py b/cuda_bindings/tests/nvml/test_pci.py new file mode 100644 index 0000000000..4d99401940 --- /dev/null +++ b/cuda_bindings/tests/nvml/test_pci.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE + + +from cuda.bindings import _nvml as nvml + +from .conftest import unsupported_before + + +def test_discover_gpus(all_devices): + for device in all_devices: + pci_info = nvml.device_get_pci_info_v3(device) + # Docs say this should be supported on PASCAL and later + with unsupported_before(device, None): + nvml.device_discover_gpus(pci_info.ptr) + + +def test_bridge_chip_hierarchy_t(): + hierarchy = nvml.BridgeChipHierarchy() + assert len(hierarchy.bridge_chip_info) == 0 + assert not hasattr(hierarchy, "bridge_count") + assert isinstance(hierarchy.bridge_chip_info, nvml.BridgeChipInfo) + + +def test_bridge_chip_info(all_devices): + for device in all_devices: + with unsupported_before(device, None): + info = nvml.device_get_bridge_chip_info(device) + assert isinstance(info, nvml.BridgeChipHierarchy) + for entry in info.bridge_chip_info: + assert isinstance(entry, nvml.BridgeChipInfo) + assert isinstance(entry.type, int) + assert isinstance(entry.fw_version, int) diff --git a/cuda_core/tests/system/test_system_device.py b/cuda_core/tests/system/test_system_device.py index ced59c2522..c27cdcd80a 100644 --- a/cuda_core/tests/system/test_system_device.py +++ b/cuda_core/tests/system/test_system_device.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 From b11d828dd37483b2e54f860428b1ff1f6993085a Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 22 Jan 2026 13:03:02 -0500 Subject: [PATCH 2/6] Minor changes based on Copilot's comments --- cuda_bindings/tests/nvml/test_device.py | 4 ++-- cuda_bindings/tests/nvml/test_gpu.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cuda_bindings/tests/nvml/test_device.py b/cuda_bindings/tests/nvml/test_device.py index 91e5ec91b8..d75d0eca6f 100644 --- a/cuda_bindings/tests/nvml/test_device.py +++ b/cuda_bindings/tests/nvml/test_device.py @@ -72,7 +72,7 @@ def test_get_nv_link_supported_bw_modes(all_devices): modes = nvml.device_get_nvlink_supported_bw_modes(device) assert isinstance(modes, nvml.NvLinkSupportedBWModes) # #define NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES 23 - assert len(modes.supported_bw_modes) <= 23 + assert len(modes.bw_modes) <= 23 assert not hasattr(modes, "total_bw_modes") for mode in modes.bw_modes: @@ -133,7 +133,7 @@ def test_get_power_management_limit(all_devices): for device in all_devices: # Docs say supported on KEPLER or later with unsupported_before(device, None): - limit = nvml.device_get_power_management_limit(device) + nvml.device_get_power_management_limit(device) def test_set_power_management_limit(all_devices): diff --git a/cuda_bindings/tests/nvml/test_gpu.py b/cuda_bindings/tests/nvml/test_gpu.py index 1493a21354..e14c67ac06 100644 --- a/cuda_bindings/tests/nvml/test_gpu.py +++ b/cuda_bindings/tests/nvml/test_gpu.py @@ -62,7 +62,7 @@ def test_gpu_conf_compute_attestation_report(all_devices): with unsupported_before(device, None): report = nvml.device_get_conf_compute_gpu_attestation_report(device, nonce=b"12345678") - assert isinstance(report, nvml.ComputeGpuAttestationReport) + assert isinstance(report, nvml.ConfComputeGpuAttestationReport) def test_conf_compute_gpu_certificate_t(): @@ -79,4 +79,4 @@ def test_conf_compute_gpu_certificate(all_devices): with unsupported_before(device, None): cert = nvml.device_get_conf_compute_gpu_certificate(device) - assert isinstance(cert, nvml.ComputeGpuCertificate) + assert isinstance(cert, nvml.ConfComputeGpuCertificate) From 12999447a94db8a039c00af46a31e8994b7ca910 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 22 Jan 2026 13:15:09 -0500 Subject: [PATCH 3/6] Fix tests, having run on more hardware --- cuda_bindings/tests/nvml/test_device.py | 16 ++++++++++++---- cuda_bindings/tests/nvml/test_gpu.py | 6 ++++-- cuda_bindings/tests/nvml/test_pci.py | 4 +++- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/cuda_bindings/tests/nvml/test_device.py b/cuda_bindings/tests/nvml/test_device.py index d75d0eca6f..1bdd2911f7 100644 --- a/cuda_bindings/tests/nvml/test_device.py +++ b/cuda_bindings/tests/nvml/test_device.py @@ -117,7 +117,10 @@ def test_read_prm_counters(all_devices): def test_read_write_prm(all_devices): for device in all_devices: with unsupported_before(device, nvml.DeviceArch.BLACKWELL): - result = nvml.device_read_write_prm_v1(device, b"012345678") + try: + result = nvml.device_read_write_prm_v1(device, b"012345678") + except nvml.NoPermissionError: + pytest.skip("No permission to read/write PRM") assert isinstance(result, tuple) assert isinstance(result[0], int) assert isinstance(result[1], bytes) @@ -125,7 +128,8 @@ def test_read_write_prm(all_devices): def test_nvlink_low_power_threshold(all_devices): for device in all_devices: - with unsupported_before(device, nvml.DeviceArch.HOPPER): + # Docs say supported on HOPPER or newer + with unsupported_before(device, None): nvml.device_set_nvlink_device_low_power_threshold(device, 0) @@ -143,17 +147,21 @@ def test_set_power_management_limit(all_devices): nvml.device_set_power_management_limit_v2(device, nvml.PowerScope.GPU, 10000) except nvml.NoPermissionError: pytest.skip("No permission to set power management limit") + except nvml.InvalidArgumentError: + pytest.skip("Invalid argument when setting power management limit -- probably unsupported") def test_set_temperature_threshold(all_devices): for device in all_devices: with unsupported_before(device, nvml.DeviceArch.MAXWELL): temp = nvml.device_get_temperature_threshold( - device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_SHUTDOWN + device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_ACOUSTIC_CURR ) try: nvml.device_set_temperature_threshold( - device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_SHUTDOWN, temp + device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_ACOUSTIC_CURR, temp ) except nvml.NoPermissionError: pytest.skip("No permission to set temperature threshold") + except nvml.InvalidArgumentError: + pytest.skip("Invalid argument when setting temperature threshold -- this is probably the temp type") diff --git a/cuda_bindings/tests/nvml/test_gpu.py b/cuda_bindings/tests/nvml/test_gpu.py index e14c67ac06..39f37331d4 100644 --- a/cuda_bindings/tests/nvml/test_gpu.py +++ b/cuda_bindings/tests/nvml/test_gpu.py @@ -59,7 +59,8 @@ def test_conf_compute_attestation_report_t(all_devices): def test_gpu_conf_compute_attestation_report(all_devices): for device in all_devices: # Documentation says AMPERE or newer - with unsupported_before(device, None): + with unsupported_before(device, None), pytest.raises(nvml.UnknownError): + # The nonce string is nonsensical, so if this "works", we expect an UnknownError report = nvml.device_get_conf_compute_gpu_attestation_report(device, nonce=b"12345678") assert isinstance(report, nvml.ConfComputeGpuAttestationReport) @@ -76,7 +77,8 @@ def test_conf_compute_gpu_certificate_t(): def test_conf_compute_gpu_certificate(all_devices): for device in all_devices: # Documentation says AMPERE or newer - with unsupported_before(device, None): + with unsupported_before(device, None), pytest.raises(nvml.UnknownError): + # This is expected to fail if the device doesn't have a proper certificate cert = nvml.device_get_conf_compute_gpu_certificate(device) assert isinstance(cert, nvml.ConfComputeGpuCertificate) diff --git a/cuda_bindings/tests/nvml/test_pci.py b/cuda_bindings/tests/nvml/test_pci.py index 4d99401940..3e57f0267c 100644 --- a/cuda_bindings/tests/nvml/test_pci.py +++ b/cuda_bindings/tests/nvml/test_pci.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE +import contextlib + from cuda.bindings import _nvml as nvml from .conftest import unsupported_before @@ -11,7 +13,7 @@ def test_discover_gpus(all_devices): for device in all_devices: pci_info = nvml.device_get_pci_info_v3(device) # Docs say this should be supported on PASCAL and later - with unsupported_before(device, None): + with unsupported_before(device, None), contextlib.suppress(nvml.OperatingSystemError): nvml.device_discover_gpus(pci_info.ptr) From 6fc9e772ed5c1df8c86c8f9c10ea28b9e6a123f7 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 22 Jan 2026 13:53:54 -0500 Subject: [PATCH 4/6] Further fix tests --- cuda_bindings/tests/nvml/test_gpu.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cuda_bindings/tests/nvml/test_gpu.py b/cuda_bindings/tests/nvml/test_gpu.py index 39f37331d4..52c5dc52fb 100644 --- a/cuda_bindings/tests/nvml/test_gpu.py +++ b/cuda_bindings/tests/nvml/test_gpu.py @@ -61,9 +61,7 @@ def test_gpu_conf_compute_attestation_report(all_devices): # Documentation says AMPERE or newer with unsupported_before(device, None), pytest.raises(nvml.UnknownError): # The nonce string is nonsensical, so if this "works", we expect an UnknownError - report = nvml.device_get_conf_compute_gpu_attestation_report(device, nonce=b"12345678") - - assert isinstance(report, nvml.ConfComputeGpuAttestationReport) + nvml.device_get_conf_compute_gpu_attestation_report(device, nonce=b"12345678") def test_conf_compute_gpu_certificate_t(): @@ -79,6 +77,4 @@ def test_conf_compute_gpu_certificate(all_devices): # Documentation says AMPERE or newer with unsupported_before(device, None), pytest.raises(nvml.UnknownError): # This is expected to fail if the device doesn't have a proper certificate - cert = nvml.device_get_conf_compute_gpu_certificate(device) - - assert isinstance(cert, nvml.ConfComputeGpuCertificate) + nvml.device_get_conf_compute_gpu_certificate(device) From 3566c1de346b939abbcd044d3fe96878de033c27 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 22 Jan 2026 15:35:42 -0500 Subject: [PATCH 5/6] Fix tests on even more hardware --- cuda_bindings/tests/nvml/test_device.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cuda_bindings/tests/nvml/test_device.py b/cuda_bindings/tests/nvml/test_device.py index 1bdd2911f7..2b92c64584 100644 --- a/cuda_bindings/tests/nvml/test_device.py +++ b/cuda_bindings/tests/nvml/test_device.py @@ -153,7 +153,8 @@ def test_set_power_management_limit(all_devices): def test_set_temperature_threshold(all_devices): for device in all_devices: - with unsupported_before(device, nvml.DeviceArch.MAXWELL): + # Docs say supported on MAXWELL or newer + with unsupported_before(device, None): temp = nvml.device_get_temperature_threshold( device, nvml.TemperatureThresholds.TEMPERATURE_THRESHOLD_ACOUSTIC_CURR ) From a75e4c36d2234752c5403b23c8894687e1488fe9 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Fri, 23 Jan 2026 09:40:36 -0500 Subject: [PATCH 6/6] More robust testing for Windows --- cuda_bindings/tests/nvml/test_device.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cuda_bindings/tests/nvml/test_device.py b/cuda_bindings/tests/nvml/test_device.py index 2b92c64584..a3f1dda265 100644 --- a/cuda_bindings/tests/nvml/test_device.py +++ b/cuda_bindings/tests/nvml/test_device.py @@ -105,6 +105,7 @@ def test_device_vgpu_get_heterogeneous_mode(all_devices): assert isinstance(mode, int) +@pytest.mark.skipif(driverVersionLessThan(13010), reason="Introduced in 13.1") def test_read_prm_counters(all_devices): for device in all_devices: counters = nvml.PRMCounter_v1(5) @@ -116,7 +117,8 @@ def test_read_prm_counters(all_devices): def test_read_write_prm(all_devices): for device in all_devices: - with unsupported_before(device, nvml.DeviceArch.BLACKWELL): + # Docs say supported in BLACKWELL or later + with unsupported_before(device, None): try: result = nvml.device_read_write_prm_v1(device, b"012345678") except nvml.NoPermissionError: