Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,14 @@ ci:
skip: [bandit]
submodules: false

# Please update the rev: SHAs below with this command:
# pre-commit autoupdate --freeze
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: 971923581912ef60a6b70dbf0c3e9a39563c9d47 #v0.11.4
rev: 0b19ef1fd6ad680ed7752d6daba883ce1265a6de # frozen: v0.12.2
hooks:
- id: ruff
args: [--fix, --show-fixes]
args: [--fix, --show-fixes, --target-version=py39]
- id: ruff-format

- repo: local
Expand All @@ -30,7 +32,7 @@ repos:
- https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl

- repo: https://github.com/PyCQA/bandit
rev: 8ff25e07e487f143571cc305e56dd0253c60bc7b #v1.8.3
rev: 2d0b675b04c80ae42277e10500db06a0a37bae17 # frozen: 1.8.6
hooks:
- id: bandit
args:
Expand Down
8 changes: 4 additions & 4 deletions cuda_core/cuda/core/experimental/_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import weakref
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from cuda.core.experimental._stream import Stream
Expand Down Expand Up @@ -112,7 +112,7 @@ class GraphCompleteOptions:
"""

auto_free_on_launch: bool = False
upload_stream: Optional[Stream] = None
upload_stream: Stream | None = None
device_launch: bool = False
use_node_priority: bool = False

Expand Down Expand Up @@ -262,7 +262,7 @@ def end_building(self) -> GraphBuilder:
self._building_ended = True
return self

def complete(self, options: Optional[GraphCompleteOptions] = None) -> Graph:
def complete(self, options: GraphCompleteOptions | None = None) -> Graph:
"""Completes the graph builder and returns the built :obj:`~_graph.Graph` object.

Parameters
Expand Down Expand Up @@ -324,7 +324,7 @@ def complete(self, options: Optional[GraphCompleteOptions] = None) -> Graph:
raise RuntimeError(f"Graph instantiation failed with unexpected error code: {params.result_out}")
return graph

def debug_dot_print(self, path, options: Optional[GraphDebugPrintOptions] = None):
def debug_dot_print(self, path, options: GraphDebugPrintOptions | None = None):
"""Generates a DOT debug file for the graph builder.

Parameters
Expand Down
44 changes: 22 additions & 22 deletions cuda_core/cuda/core/experimental/_linker.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import weakref
from contextlib import contextmanager
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
from typing import TYPE_CHECKING, List, Tuple, Union
from warnings import warn

if TYPE_CHECKING:
Expand Down Expand Up @@ -163,27 +163,27 @@ class LinkerOptions:
Default: False.
"""

name: Optional[str] = "<default linker>"
arch: Optional[str] = None
max_register_count: Optional[int] = None
time: Optional[bool] = None
verbose: Optional[bool] = None
link_time_optimization: Optional[bool] = None
ptx: Optional[bool] = None
optimization_level: Optional[int] = None
debug: Optional[bool] = None
lineinfo: Optional[bool] = None
ftz: Optional[bool] = None
prec_div: Optional[bool] = None
prec_sqrt: Optional[bool] = None
fma: Optional[bool] = None
kernels_used: Optional[Union[str, Tuple[str], List[str]]] = None
variables_used: Optional[Union[str, Tuple[str], List[str]]] = None
optimize_unused_variables: Optional[bool] = None
ptxas_options: Optional[Union[str, Tuple[str], List[str]]] = None
split_compile: Optional[int] = None
split_compile_extended: Optional[int] = None
no_cache: Optional[bool] = None
name: str | None = "<default linker>"
arch: str | None = None
max_register_count: int | None = None
time: bool | None = None
verbose: bool | None = None
link_time_optimization: bool | None = None
ptx: bool | None = None
optimization_level: int | None = None
debug: bool | None = None
lineinfo: bool | None = None
ftz: bool | None = None
prec_div: bool | None = None
prec_sqrt: bool | None = None
fma: bool | None = None
kernels_used: Union[str, Tuple[str], List[str]] | None = None
variables_used: Union[str, Tuple[str], List[str]] | None = None
optimize_unused_variables: bool | None = None
ptxas_options: Union[str, Tuple[str], List[str]] | None = None
split_compile: int | None = None
split_compile_extended: int | None = None
no_cache: bool | None = None

def __post_init__(self):
_lazy_init()
Expand Down
14 changes: 7 additions & 7 deletions cuda_core/cuda/core/experimental/_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import abc
import weakref
from typing import Optional, Tuple, TypeVar, Union
from typing import Tuple, TypeVar, Union

from cuda.core.experimental._dlpack import DLDeviceType, make_py_capsule
from cuda.core.experimental._stream import Stream, default_stream
Expand Down Expand Up @@ -55,7 +55,7 @@ def __new__(self, *args, **kwargs):
raise RuntimeError("Buffer objects cannot be instantiated directly. Please use MemoryResource APIs.")

@classmethod
def _init(cls, ptr: DevicePointerT, size: int, mr: Optional[MemoryResource] = None):
def _init(cls, ptr: DevicePointerT, size: int, mr: MemoryResource | None = None):
self = super().__new__(cls)
self._mnff = Buffer._MembersNeededForFinalize(self, ptr, size, mr)
return self
Expand Down Expand Up @@ -168,10 +168,10 @@ def copy_from(self, src: Buffer, *, stream: Stream):
def __dlpack__(
self,
*,
stream: Optional[int] = None,
max_version: Optional[Tuple[int, int]] = None,
dl_device: Optional[Tuple[int, int]] = None,
copy: Optional[bool] = None,
stream: int | None = None,
max_version: Tuple[int, int] | None = None,
dl_device: Tuple[int, int] | None = None,
copy: bool | None = None,
) -> PyCapsule:
# Note: we ignore the stream argument entirely (as if it is -1).
# It is the user's responsibility to maintain stream order.
Expand Down Expand Up @@ -211,7 +211,7 @@ def __release_buffer__(self, buffer: memoryview, /):
raise NotImplementedError("WIP: Buffer.__release_buffer__ hasn't been implemented yet.")

@staticmethod
def from_handle(ptr: DevicePointerT, size: int, mr: Optional[MemoryResource] = None) -> Buffer:
def from_handle(ptr: DevicePointerT, size: int, mr: MemoryResource | None = None) -> Buffer:
"""Create a new :class:`Buffer` object from a pointer.

Parameters
Expand Down
84 changes: 42 additions & 42 deletions cuda_core/cuda/core/experimental/_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import weakref
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
from typing import TYPE_CHECKING, List, Tuple, Union
from warnings import warn

if TYPE_CHECKING:
Expand Down Expand Up @@ -182,47 +182,47 @@ class ProgramOptions:
Default: False
"""

name: Optional[str] = "<default program>"
arch: Optional[str] = None
relocatable_device_code: Optional[bool] = None
extensible_whole_program: Optional[bool] = None
debug: Optional[bool] = None
lineinfo: Optional[bool] = None
device_code_optimize: Optional[bool] = None
ptxas_options: Optional[Union[str, List[str], Tuple[str]]] = None
max_register_count: Optional[int] = None
ftz: Optional[bool] = None
prec_sqrt: Optional[bool] = None
prec_div: Optional[bool] = None
fma: Optional[bool] = None
use_fast_math: Optional[bool] = None
extra_device_vectorization: Optional[bool] = None
link_time_optimization: Optional[bool] = None
gen_opt_lto: Optional[bool] = None
define_macro: Optional[
Union[str, Tuple[str, str], List[Union[str, Tuple[str, str]]], Tuple[Union[str, Tuple[str, str]]]]
] = None
undefine_macro: Optional[Union[str, List[str], Tuple[str]]] = None
include_path: Optional[Union[str, List[str], Tuple[str]]] = None
pre_include: Optional[Union[str, List[str], Tuple[str]]] = None
no_source_include: Optional[bool] = None
std: Optional[str] = None
builtin_move_forward: Optional[bool] = None
builtin_initializer_list: Optional[bool] = None
disable_warnings: Optional[bool] = None
restrict: Optional[bool] = None
device_as_default_execution_space: Optional[bool] = None
device_int128: Optional[bool] = None
optimization_info: Optional[str] = None
no_display_error_number: Optional[bool] = None
diag_error: Optional[Union[int, List[int], Tuple[int]]] = None
diag_suppress: Optional[Union[int, List[int], Tuple[int]]] = None
diag_warn: Optional[Union[int, List[int], Tuple[int]]] = None
brief_diagnostics: Optional[bool] = None
time: Optional[str] = None
split_compile: Optional[int] = None
fdevice_syntax_only: Optional[bool] = None
minimal: Optional[bool] = None
name: str | None = "<default program>"
arch: str | None = None
relocatable_device_code: bool | None = None
extensible_whole_program: bool | None = None
debug: bool | None = None
lineinfo: bool | None = None
device_code_optimize: bool | None = None
ptxas_options: Union[str, List[str], Tuple[str]] | None = None
max_register_count: int | None = None
ftz: bool | None = None
prec_sqrt: bool | None = None
prec_div: bool | None = None
fma: bool | None = None
use_fast_math: bool | None = None
extra_device_vectorization: bool | None = None
link_time_optimization: bool | None = None
gen_opt_lto: bool | None = None
define_macro: (
Union[str, Tuple[str, str], List[Union[str, Tuple[str, str]]], Tuple[Union[str, Tuple[str, str]]]] | None
) = None
undefine_macro: Union[str, List[str], Tuple[str]] | None = None
include_path: Union[str, List[str], Tuple[str]] | None = None
pre_include: Union[str, List[str], Tuple[str]] | None = None
no_source_include: bool | None = None
std: str | None = None
builtin_move_forward: bool | None = None
builtin_initializer_list: bool | None = None
disable_warnings: bool | None = None
restrict: bool | None = None
device_as_default_execution_space: bool | None = None
device_int128: bool | None = None
optimization_info: str | None = None
no_display_error_number: bool | None = None
diag_error: Union[int, List[int], Tuple[int]] | None = None
diag_suppress: Union[int, List[int], Tuple[int]] | None = None
diag_warn: Union[int, List[int], Tuple[int]] | None = None
brief_diagnostics: bool | None = None
time: str | None = None
split_compile: int | None = None
fdevice_syntax_only: bool | None = None
minimal: bool | None = None

def __post_init__(self):
self._name = self.name.encode()
Expand Down